]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-2.6.32.58-201203062047.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-2.6.32.58-201203062047.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index e1efc40..47f0daf 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9 +*.cis
10 *.cpio
11 *.csp
12 +*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18 +*.gcno
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *_MODULES
32 +*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36 @@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40 +GPATH
41 +GRTAGS
42 +GSYMS
43 +GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49 +PERF*
50 SCCS
51 System.map*
52 TAGS
53 @@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57 +capability_names.h
58 +capflags.c
59 classlist.h*
60 +clut_vga16.c
61 +common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65 @@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69 +config.c
70 +config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74 @@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78 +gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90 +initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103 +mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110 +mkpiggy
111 mkprep
112 +mkregtable
113 mktables
114 mktree
115 modpost
116 @@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120 +piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124 @@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128 +regdb.c
129 relocs
130 +rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152 +vmlinux.bin.all
153 +vmlinux.bin.bz2
154 vmlinux.lds
155 +vmlinux.relocs
156 +voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zoffset.h
169 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170 index c840e7d..f4c451c 100644
171 --- a/Documentation/kernel-parameters.txt
172 +++ b/Documentation/kernel-parameters.txt
173 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178 + virtualization environments that don't cope well with the
179 + expand down segment used by UDEREF on X86-32 or the frequent
180 + page table updates on X86-64.
181 +
182 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183 +
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187 diff --git a/Makefile b/Makefile
188 index ed78982..bcc432e 100644
189 --- a/Makefile
190 +++ b/Makefile
191 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196 -HOSTCXXFLAGS = -O2
197 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203 @@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207 -PHONY += scripts_basic
208 -scripts_basic:
209 +PHONY += scripts_basic gcc-plugins
210 +scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214 @@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218 - cscope TAGS tags help %docs check% \
219 + cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223 @@ -526,6 +527,48 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227 +ifndef DISABLE_PAX_PLUGINS
228 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231 +endif
232 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
233 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235 +endif
236 +ifdef CONFIG_KALLOCSTAT_PLUGIN
237 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238 +endif
239 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
242 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
243 +endif
244 +ifdef CONFIG_CHECKER_PLUGIN
245 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
246 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
247 +endif
248 +endif
249 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS)
250 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
251 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
252 +ifeq ($(KBUILD_EXTMOD),)
253 +gcc-plugins:
254 + $(Q)$(MAKE) $(build)=tools/gcc
255 +else
256 +gcc-plugins: ;
257 +endif
258 +else
259 +gcc-plugins:
260 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
261 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
262 +else
263 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
264 +endif
265 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
266 +endif
267 +endif
268 +
269 include $(srctree)/arch/$(SRCARCH)/Makefile
270
271 ifneq ($(CONFIG_FRAME_WARN),0)
272 @@ -647,7 +690,7 @@ export mod_strip_cmd
273
274
275 ifeq ($(KBUILD_EXTMOD),)
276 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
277 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
278
279 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
280 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
281 @@ -868,6 +911,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
282
283 # The actual objects are generated when descending,
284 # make sure no implicit rule kicks in
285 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
286 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
287 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288
289 # Handle descending into subdirectories listed in $(vmlinux-dirs)
290 @@ -877,7 +922,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
291 # Error messages still appears in the original language
292
293 PHONY += $(vmlinux-dirs)
294 -$(vmlinux-dirs): prepare scripts
295 +$(vmlinux-dirs): gcc-plugins prepare scripts
296 $(Q)$(MAKE) $(build)=$@
297
298 # Build the kernel release string
299 @@ -986,6 +1031,7 @@ prepare0: archprepare FORCE
300 $(Q)$(MAKE) $(build)=. missing-syscalls
301
302 # All the preparing..
303 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
304 prepare: prepare0
305
306 # The asm symlink changes when $(ARCH) changes.
307 @@ -1127,6 +1173,8 @@ all: modules
308 # using awk while concatenating to the final file.
309
310 PHONY += modules
311 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
312 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
313 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
314 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
315 @$(kecho) ' Building modules, stage 2.';
316 @@ -1136,7 +1184,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
317
318 # Target to prepare building external modules
319 PHONY += modules_prepare
320 -modules_prepare: prepare scripts
321 +modules_prepare: gcc-plugins prepare scripts
322
323 # Target to install modules
324 PHONY += modules_install
325 @@ -1201,7 +1249,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
326 include/linux/autoconf.h include/linux/version.h \
327 include/linux/utsrelease.h \
328 include/linux/bounds.h include/asm*/asm-offsets.h \
329 - Module.symvers Module.markers tags TAGS cscope*
330 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
331
332 # clean - Delete most, but leave enough to build external modules
333 #
334 @@ -1245,7 +1293,7 @@ distclean: mrproper
335 @find $(srctree) $(RCS_FIND_IGNORE) \
336 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
337 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
338 - -o -name '.*.rej' -o -size 0 \
339 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
340 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
341 -type f -print | xargs rm -f
342
343 @@ -1292,6 +1340,7 @@ help:
344 @echo ' modules_prepare - Set up for building external modules'
345 @echo ' tags/TAGS - Generate tags file for editors'
346 @echo ' cscope - Generate cscope index'
347 + @echo ' gtags - Generate GNU GLOBAL index'
348 @echo ' kernelrelease - Output the release version string'
349 @echo ' kernelversion - Output the version stored in Makefile'
350 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
351 @@ -1393,6 +1442,8 @@ PHONY += $(module-dirs) modules
352 $(module-dirs): crmodverdir $(objtree)/Module.symvers
353 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
354
355 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
356 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
357 modules: $(module-dirs)
358 @$(kecho) ' Building modules, stage 2.';
359 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
360 @@ -1448,7 +1499,7 @@ endif # KBUILD_EXTMOD
361 quiet_cmd_tags = GEN $@
362 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
363
364 -tags TAGS cscope: FORCE
365 +tags TAGS cscope gtags: FORCE
366 $(call cmd,tags)
367
368 # Scripts to check various things for consistency
369 @@ -1513,17 +1564,21 @@ else
370 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
371 endif
372
373 -%.s: %.c prepare scripts FORCE
374 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
375 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
376 +%.s: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.i: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380 -%.o: %.c prepare scripts FORCE
381 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
382 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
383 +%.o: %.c gcc-plugins prepare scripts FORCE
384 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
385 %.lst: %.c prepare scripts FORCE
386 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
387 -%.s: %.S prepare scripts FORCE
388 +%.s: %.S gcc-plugins prepare scripts FORCE
389 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
390 -%.o: %.S prepare scripts FORCE
391 +%.o: %.S gcc-plugins prepare scripts FORCE
392 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
393 %.symtypes: %.c prepare scripts FORCE
394 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
395 @@ -1533,11 +1588,15 @@ endif
396 $(cmd_crmodverdir)
397 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
398 $(build)=$(build-dir)
399 -%/: prepare scripts FORCE
400 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
401 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
402 +%/: gcc-plugins prepare scripts FORCE
403 $(cmd_crmodverdir)
404 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
405 $(build)=$(build-dir)
406 -%.ko: prepare scripts FORCE
407 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
408 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
409 +%.ko: gcc-plugins prepare scripts FORCE
410 $(cmd_crmodverdir)
411 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
412 $(build)=$(build-dir) $(@:.ko=.o)
413 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
414 index 610dff4..f396854 100644
415 --- a/arch/alpha/include/asm/atomic.h
416 +++ b/arch/alpha/include/asm/atomic.h
417 @@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
418 #define atomic_dec(v) atomic_sub(1,(v))
419 #define atomic64_dec(v) atomic64_sub(1,(v))
420
421 +#define atomic64_read_unchecked(v) atomic64_read(v)
422 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
423 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
424 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
425 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
426 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
427 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
428 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
429 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
430 +
431 #define smp_mb__before_atomic_dec() smp_mb()
432 #define smp_mb__after_atomic_dec() smp_mb()
433 #define smp_mb__before_atomic_inc() smp_mb()
434 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
435 index 5c75c1b..c82f878 100644
436 --- a/arch/alpha/include/asm/elf.h
437 +++ b/arch/alpha/include/asm/elf.h
438 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
439
440 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
441
442 +#ifdef CONFIG_PAX_ASLR
443 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
444 +
445 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
446 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
447 +#endif
448 +
449 /* $0 is set by ld.so to a pointer to a function which might be
450 registered using atexit. This provides a mean for the dynamic
451 linker to call DT_FINI functions for shared libraries that have
452 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
453 index 3f0c59f..cf1e100 100644
454 --- a/arch/alpha/include/asm/pgtable.h
455 +++ b/arch/alpha/include/asm/pgtable.h
456 @@ -101,6 +101,17 @@ struct vm_area_struct;
457 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
458 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
459 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
460 +
461 +#ifdef CONFIG_PAX_PAGEEXEC
462 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
463 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
464 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
465 +#else
466 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
467 +# define PAGE_COPY_NOEXEC PAGE_COPY
468 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
469 +#endif
470 +
471 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
472
473 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
474 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
475 index ebc3c89..20cfa63 100644
476 --- a/arch/alpha/kernel/module.c
477 +++ b/arch/alpha/kernel/module.c
478 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
479
480 /* The small sections were sorted to the end of the segment.
481 The following should definitely cover them. */
482 - gp = (u64)me->module_core + me->core_size - 0x8000;
483 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
484 got = sechdrs[me->arch.gotsecindex].sh_addr;
485
486 for (i = 0; i < n; i++) {
487 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
488 index a94e49c..d71dd44 100644
489 --- a/arch/alpha/kernel/osf_sys.c
490 +++ b/arch/alpha/kernel/osf_sys.c
491 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
492 /* At this point: (!vma || addr < vma->vm_end). */
493 if (limit - len < addr)
494 return -ENOMEM;
495 - if (!vma || addr + len <= vma->vm_start)
496 + if (check_heap_stack_gap(vma, addr, len))
497 return addr;
498 addr = vma->vm_end;
499 vma = vma->vm_next;
500 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
501 merely specific addresses, but regions of memory -- perhaps
502 this feature should be incorporated into all ports? */
503
504 +#ifdef CONFIG_PAX_RANDMMAP
505 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
506 +#endif
507 +
508 if (addr) {
509 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
510 if (addr != (unsigned long) -ENOMEM)
511 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
512 }
513
514 /* Next, try allocating at TASK_UNMAPPED_BASE. */
515 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
516 - len, limit);
517 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
518 +
519 if (addr != (unsigned long) -ENOMEM)
520 return addr;
521
522 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
523 index 00a31de..2ded0f2 100644
524 --- a/arch/alpha/mm/fault.c
525 +++ b/arch/alpha/mm/fault.c
526 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
527 __reload_thread(pcb);
528 }
529
530 +#ifdef CONFIG_PAX_PAGEEXEC
531 +/*
532 + * PaX: decide what to do with offenders (regs->pc = fault address)
533 + *
534 + * returns 1 when task should be killed
535 + * 2 when patched PLT trampoline was detected
536 + * 3 when unpatched PLT trampoline was detected
537 + */
538 +static int pax_handle_fetch_fault(struct pt_regs *regs)
539 +{
540 +
541 +#ifdef CONFIG_PAX_EMUPLT
542 + int err;
543 +
544 + do { /* PaX: patched PLT emulation #1 */
545 + unsigned int ldah, ldq, jmp;
546 +
547 + err = get_user(ldah, (unsigned int *)regs->pc);
548 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
549 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
550 +
551 + if (err)
552 + break;
553 +
554 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
555 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
556 + jmp == 0x6BFB0000U)
557 + {
558 + unsigned long r27, addr;
559 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
560 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
561 +
562 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
563 + err = get_user(r27, (unsigned long *)addr);
564 + if (err)
565 + break;
566 +
567 + regs->r27 = r27;
568 + regs->pc = r27;
569 + return 2;
570 + }
571 + } while (0);
572 +
573 + do { /* PaX: patched PLT emulation #2 */
574 + unsigned int ldah, lda, br;
575 +
576 + err = get_user(ldah, (unsigned int *)regs->pc);
577 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
578 + err |= get_user(br, (unsigned int *)(regs->pc+8));
579 +
580 + if (err)
581 + break;
582 +
583 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
584 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
585 + (br & 0xFFE00000U) == 0xC3E00000U)
586 + {
587 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
588 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
589 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
590 +
591 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
592 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
593 + return 2;
594 + }
595 + } while (0);
596 +
597 + do { /* PaX: unpatched PLT emulation */
598 + unsigned int br;
599 +
600 + err = get_user(br, (unsigned int *)regs->pc);
601 +
602 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
603 + unsigned int br2, ldq, nop, jmp;
604 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
605 +
606 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
607 + err = get_user(br2, (unsigned int *)addr);
608 + err |= get_user(ldq, (unsigned int *)(addr+4));
609 + err |= get_user(nop, (unsigned int *)(addr+8));
610 + err |= get_user(jmp, (unsigned int *)(addr+12));
611 + err |= get_user(resolver, (unsigned long *)(addr+16));
612 +
613 + if (err)
614 + break;
615 +
616 + if (br2 == 0xC3600000U &&
617 + ldq == 0xA77B000CU &&
618 + nop == 0x47FF041FU &&
619 + jmp == 0x6B7B0000U)
620 + {
621 + regs->r28 = regs->pc+4;
622 + regs->r27 = addr+16;
623 + regs->pc = resolver;
624 + return 3;
625 + }
626 + }
627 + } while (0);
628 +#endif
629 +
630 + return 1;
631 +}
632 +
633 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
634 +{
635 + unsigned long i;
636 +
637 + printk(KERN_ERR "PAX: bytes at PC: ");
638 + for (i = 0; i < 5; i++) {
639 + unsigned int c;
640 + if (get_user(c, (unsigned int *)pc+i))
641 + printk(KERN_CONT "???????? ");
642 + else
643 + printk(KERN_CONT "%08x ", c);
644 + }
645 + printk("\n");
646 +}
647 +#endif
648
649 /*
650 * This routine handles page faults. It determines the address,
651 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
652 good_area:
653 si_code = SEGV_ACCERR;
654 if (cause < 0) {
655 - if (!(vma->vm_flags & VM_EXEC))
656 + if (!(vma->vm_flags & VM_EXEC)) {
657 +
658 +#ifdef CONFIG_PAX_PAGEEXEC
659 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
660 + goto bad_area;
661 +
662 + up_read(&mm->mmap_sem);
663 + switch (pax_handle_fetch_fault(regs)) {
664 +
665 +#ifdef CONFIG_PAX_EMUPLT
666 + case 2:
667 + case 3:
668 + return;
669 +#endif
670 +
671 + }
672 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
673 + do_group_exit(SIGKILL);
674 +#else
675 goto bad_area;
676 +#endif
677 +
678 + }
679 } else if (!cause) {
680 /* Allow reads even for write-only mappings */
681 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
682 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
683 index b68faef..6dd1496 100644
684 --- a/arch/arm/Kconfig
685 +++ b/arch/arm/Kconfig
686 @@ -14,6 +14,7 @@ config ARM
687 select SYS_SUPPORTS_APM_EMULATION
688 select HAVE_OPROFILE
689 select HAVE_ARCH_KGDB
690 + select GENERIC_ATOMIC64
691 select HAVE_KPROBES if (!XIP_KERNEL)
692 select HAVE_KRETPROBES if (HAVE_KPROBES)
693 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
694 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
695 index d0daeab..ff286a8 100644
696 --- a/arch/arm/include/asm/atomic.h
697 +++ b/arch/arm/include/asm/atomic.h
698 @@ -15,6 +15,10 @@
699 #include <linux/types.h>
700 #include <asm/system.h>
701
702 +#ifdef CONFIG_GENERIC_ATOMIC64
703 +#include <asm-generic/atomic64.h>
704 +#endif
705 +
706 #define ATOMIC_INIT(i) { (i) }
707
708 #ifdef __KERNEL__
709 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
710 index 6aac3f5..265536b 100644
711 --- a/arch/arm/include/asm/elf.h
712 +++ b/arch/arm/include/asm/elf.h
713 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719 +
720 +#ifdef CONFIG_PAX_ASLR
721 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
722 +
723 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
724 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
725 +#endif
726
727 /* When the program starts, a1 contains a pointer to a function to be
728 registered with atexit, as per the SVR4 ABI. A value of 0 means we
729 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
730 index c019949..388fdd1 100644
731 --- a/arch/arm/include/asm/kmap_types.h
732 +++ b/arch/arm/include/asm/kmap_types.h
733 @@ -19,6 +19,7 @@ enum km_type {
734 KM_SOFTIRQ0,
735 KM_SOFTIRQ1,
736 KM_L2_CACHE,
737 + KM_CLEARPAGE,
738 KM_TYPE_NR
739 };
740
741 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
742 index 1d6bd40..fba0cb9 100644
743 --- a/arch/arm/include/asm/uaccess.h
744 +++ b/arch/arm/include/asm/uaccess.h
745 @@ -22,6 +22,8 @@
746 #define VERIFY_READ 0
747 #define VERIFY_WRITE 1
748
749 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
750 +
751 /*
752 * The exception table consists of pairs of addresses: the first is the
753 * address of an instruction that is allowed to fault, and the second is
754 @@ -387,8 +389,23 @@ do { \
755
756
757 #ifdef CONFIG_MMU
758 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
759 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
760 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
761 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
762 +
763 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
764 +{
765 + if (!__builtin_constant_p(n))
766 + check_object_size(to, n, false);
767 + return ___copy_from_user(to, from, n);
768 +}
769 +
770 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
771 +{
772 + if (!__builtin_constant_p(n))
773 + check_object_size(from, n, true);
774 + return ___copy_to_user(to, from, n);
775 +}
776 +
777 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
778 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
779 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
780 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
781
782 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
783 {
784 + if ((long)n < 0)
785 + return n;
786 +
787 if (access_ok(VERIFY_READ, from, n))
788 n = __copy_from_user(to, from, n);
789 else /* security hole - plug it */
790 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
791
792 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
793 {
794 + if ((long)n < 0)
795 + return n;
796 +
797 if (access_ok(VERIFY_WRITE, to, n))
798 n = __copy_to_user(to, from, n);
799 return n;
800 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
801 index 0e62770..e2c2cd6 100644
802 --- a/arch/arm/kernel/armksyms.c
803 +++ b/arch/arm/kernel/armksyms.c
804 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
805 #ifdef CONFIG_MMU
806 EXPORT_SYMBOL(copy_page);
807
808 -EXPORT_SYMBOL(__copy_from_user);
809 -EXPORT_SYMBOL(__copy_to_user);
810 +EXPORT_SYMBOL(___copy_from_user);
811 +EXPORT_SYMBOL(___copy_to_user);
812 EXPORT_SYMBOL(__clear_user);
813
814 EXPORT_SYMBOL(__get_user_1);
815 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
816 index ba8ccfe..2dc34dc 100644
817 --- a/arch/arm/kernel/kgdb.c
818 +++ b/arch/arm/kernel/kgdb.c
819 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
820 * and we handle the normal undef case within the do_undefinstr
821 * handler.
822 */
823 -struct kgdb_arch arch_kgdb_ops = {
824 +const struct kgdb_arch arch_kgdb_ops = {
825 #ifndef __ARMEB__
826 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
827 #else /* ! __ARMEB__ */
828 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
829 index 3f361a7..6e806e1 100644
830 --- a/arch/arm/kernel/traps.c
831 +++ b/arch/arm/kernel/traps.c
832 @@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
833
834 DEFINE_SPINLOCK(die_lock);
835
836 +extern void gr_handle_kernel_exploit(void);
837 +
838 /*
839 * This function is protected against re-entrancy.
840 */
841 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
842 if (panic_on_oops)
843 panic("Fatal exception");
844
845 + gr_handle_kernel_exploit();
846 +
847 do_exit(SIGSEGV);
848 }
849
850 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
851 index e4fe124..0fc246b 100644
852 --- a/arch/arm/lib/copy_from_user.S
853 +++ b/arch/arm/lib/copy_from_user.S
854 @@ -16,7 +16,7 @@
855 /*
856 * Prototype:
857 *
858 - * size_t __copy_from_user(void *to, const void *from, size_t n)
859 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
860 *
861 * Purpose:
862 *
863 @@ -84,11 +84,11 @@
864
865 .text
866
867 -ENTRY(__copy_from_user)
868 +ENTRY(___copy_from_user)
869
870 #include "copy_template.S"
871
872 -ENDPROC(__copy_from_user)
873 +ENDPROC(___copy_from_user)
874
875 .section .fixup,"ax"
876 .align 0
877 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
878 index 1a71e15..ac7b258 100644
879 --- a/arch/arm/lib/copy_to_user.S
880 +++ b/arch/arm/lib/copy_to_user.S
881 @@ -16,7 +16,7 @@
882 /*
883 * Prototype:
884 *
885 - * size_t __copy_to_user(void *to, const void *from, size_t n)
886 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
887 *
888 * Purpose:
889 *
890 @@ -88,11 +88,11 @@
891 .text
892
893 ENTRY(__copy_to_user_std)
894 -WEAK(__copy_to_user)
895 +WEAK(___copy_to_user)
896
897 #include "copy_template.S"
898
899 -ENDPROC(__copy_to_user)
900 +ENDPROC(___copy_to_user)
901
902 .section .fixup,"ax"
903 .align 0
904 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
905 index ffdd274..91017b6 100644
906 --- a/arch/arm/lib/uaccess.S
907 +++ b/arch/arm/lib/uaccess.S
908 @@ -19,7 +19,7 @@
909
910 #define PAGE_SHIFT 12
911
912 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
913 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
914 * Purpose : copy a block to user memory from kernel memory
915 * Params : to - user memory
916 * : from - kernel memory
917 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
918 sub r2, r2, ip
919 b .Lc2u_dest_aligned
920
921 -ENTRY(__copy_to_user)
922 +ENTRY(___copy_to_user)
923 stmfd sp!, {r2, r4 - r7, lr}
924 cmp r2, #4
925 blt .Lc2u_not_enough
926 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
927 ldrgtb r3, [r1], #0
928 USER( strgtbt r3, [r0], #1) @ May fault
929 b .Lc2u_finished
930 -ENDPROC(__copy_to_user)
931 +ENDPROC(___copy_to_user)
932
933 .section .fixup,"ax"
934 .align 0
935 9001: ldmfd sp!, {r0, r4 - r7, pc}
936 .previous
937
938 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
939 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
940 * Purpose : copy a block from user memory to kernel memory
941 * Params : to - kernel memory
942 * : from - user memory
943 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
944 sub r2, r2, ip
945 b .Lcfu_dest_aligned
946
947 -ENTRY(__copy_from_user)
948 +ENTRY(___copy_from_user)
949 stmfd sp!, {r0, r2, r4 - r7, lr}
950 cmp r2, #4
951 blt .Lcfu_not_enough
952 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
953 USER( ldrgtbt r3, [r1], #1) @ May fault
954 strgtb r3, [r0], #1
955 b .Lcfu_finished
956 -ENDPROC(__copy_from_user)
957 +ENDPROC(___copy_from_user)
958
959 .section .fixup,"ax"
960 .align 0
961 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
962 index 6b967ff..67d5b2b 100644
963 --- a/arch/arm/lib/uaccess_with_memcpy.c
964 +++ b/arch/arm/lib/uaccess_with_memcpy.c
965 @@ -97,7 +97,7 @@ out:
966 }
967
968 unsigned long
969 -__copy_to_user(void __user *to, const void *from, unsigned long n)
970 +___copy_to_user(void __user *to, const void *from, unsigned long n)
971 {
972 /*
973 * This test is stubbed out of the main function above to keep
974 diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
975 index 4028724..beec230 100644
976 --- a/arch/arm/mach-at91/pm.c
977 +++ b/arch/arm/mach-at91/pm.c
978 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
979 }
980
981
982 -static struct platform_suspend_ops at91_pm_ops ={
983 +static const struct platform_suspend_ops at91_pm_ops ={
984 .valid = at91_pm_valid_state,
985 .begin = at91_pm_begin,
986 .enter = at91_pm_enter,
987 diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
988 index 5218943..0a34552 100644
989 --- a/arch/arm/mach-omap1/pm.c
990 +++ b/arch/arm/mach-omap1/pm.c
991 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
992
993
994
995 -static struct platform_suspend_ops omap_pm_ops ={
996 +static const struct platform_suspend_ops omap_pm_ops ={
997 .prepare = omap_pm_prepare,
998 .enter = omap_pm_enter,
999 .finish = omap_pm_finish,
1000 diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
1001 index bff5c4e..d4c649b 100644
1002 --- a/arch/arm/mach-omap2/pm24xx.c
1003 +++ b/arch/arm/mach-omap2/pm24xx.c
1004 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
1005 enable_hlt();
1006 }
1007
1008 -static struct platform_suspend_ops omap_pm_ops = {
1009 +static const struct platform_suspend_ops omap_pm_ops = {
1010 .prepare = omap2_pm_prepare,
1011 .enter = omap2_pm_enter,
1012 .finish = omap2_pm_finish,
1013 diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
1014 index 8946319..7d3e661 100644
1015 --- a/arch/arm/mach-omap2/pm34xx.c
1016 +++ b/arch/arm/mach-omap2/pm34xx.c
1017 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
1018 return;
1019 }
1020
1021 -static struct platform_suspend_ops omap_pm_ops = {
1022 +static const struct platform_suspend_ops omap_pm_ops = {
1023 .begin = omap3_pm_begin,
1024 .end = omap3_pm_end,
1025 .prepare = omap3_pm_prepare,
1026 diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
1027 index b3d8d53..6e68ebc 100644
1028 --- a/arch/arm/mach-pnx4008/pm.c
1029 +++ b/arch/arm/mach-pnx4008/pm.c
1030 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
1031 (state == PM_SUSPEND_MEM);
1032 }
1033
1034 -static struct platform_suspend_ops pnx4008_pm_ops = {
1035 +static const struct platform_suspend_ops pnx4008_pm_ops = {
1036 .enter = pnx4008_pm_enter,
1037 .valid = pnx4008_pm_valid,
1038 };
1039 diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
1040 index 7693355..9beb00a 100644
1041 --- a/arch/arm/mach-pxa/pm.c
1042 +++ b/arch/arm/mach-pxa/pm.c
1043 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
1044 pxa_cpu_pm_fns->finish();
1045 }
1046
1047 -static struct platform_suspend_ops pxa_pm_ops = {
1048 +static const struct platform_suspend_ops pxa_pm_ops = {
1049 .valid = pxa_pm_valid,
1050 .enter = pxa_pm_enter,
1051 .prepare = pxa_pm_prepare,
1052 diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
1053 index 629e05d..06be589 100644
1054 --- a/arch/arm/mach-pxa/sharpsl_pm.c
1055 +++ b/arch/arm/mach-pxa/sharpsl_pm.c
1056 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1057 }
1058
1059 #ifdef CONFIG_PM
1060 -static struct platform_suspend_ops sharpsl_pm_ops = {
1061 +static const struct platform_suspend_ops sharpsl_pm_ops = {
1062 .prepare = pxa_pm_prepare,
1063 .finish = pxa_pm_finish,
1064 .enter = corgi_pxa_pm_enter,
1065 diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1066 index c83fdc8..ab9fc44 100644
1067 --- a/arch/arm/mach-sa1100/pm.c
1068 +++ b/arch/arm/mach-sa1100/pm.c
1069 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1070 return virt_to_phys(sp);
1071 }
1072
1073 -static struct platform_suspend_ops sa11x0_pm_ops = {
1074 +static const struct platform_suspend_ops sa11x0_pm_ops = {
1075 .enter = sa11x0_pm_enter,
1076 .valid = suspend_valid_only_mem,
1077 };
1078 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1079 index 3191cd6..c0739db 100644
1080 --- a/arch/arm/mm/fault.c
1081 +++ b/arch/arm/mm/fault.c
1082 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1083 }
1084 #endif
1085
1086 +#ifdef CONFIG_PAX_PAGEEXEC
1087 + if (fsr & FSR_LNX_PF) {
1088 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1089 + do_group_exit(SIGKILL);
1090 + }
1091 +#endif
1092 +
1093 tsk->thread.address = addr;
1094 tsk->thread.error_code = fsr;
1095 tsk->thread.trap_no = 14;
1096 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1097 }
1098 #endif /* CONFIG_MMU */
1099
1100 +#ifdef CONFIG_PAX_PAGEEXEC
1101 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1102 +{
1103 + long i;
1104 +
1105 + printk(KERN_ERR "PAX: bytes at PC: ");
1106 + for (i = 0; i < 20; i++) {
1107 + unsigned char c;
1108 + if (get_user(c, (__force unsigned char __user *)pc+i))
1109 + printk(KERN_CONT "?? ");
1110 + else
1111 + printk(KERN_CONT "%02x ", c);
1112 + }
1113 + printk("\n");
1114 +
1115 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1116 + for (i = -1; i < 20; i++) {
1117 + unsigned long c;
1118 + if (get_user(c, (__force unsigned long __user *)sp+i))
1119 + printk(KERN_CONT "???????? ");
1120 + else
1121 + printk(KERN_CONT "%08lx ", c);
1122 + }
1123 + printk("\n");
1124 +}
1125 +#endif
1126 +
1127 /*
1128 * First Level Translation Fault Handler
1129 *
1130 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1131 index f5abc51..7ec524c 100644
1132 --- a/arch/arm/mm/mmap.c
1133 +++ b/arch/arm/mm/mmap.c
1134 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1135 if (len > TASK_SIZE)
1136 return -ENOMEM;
1137
1138 +#ifdef CONFIG_PAX_RANDMMAP
1139 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1140 +#endif
1141 +
1142 if (addr) {
1143 if (do_align)
1144 addr = COLOUR_ALIGN(addr, pgoff);
1145 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1146 addr = PAGE_ALIGN(addr);
1147
1148 vma = find_vma(mm, addr);
1149 - if (TASK_SIZE - len >= addr &&
1150 - (!vma || addr + len <= vma->vm_start))
1151 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1152 return addr;
1153 }
1154 if (len > mm->cached_hole_size) {
1155 - start_addr = addr = mm->free_area_cache;
1156 + start_addr = addr = mm->free_area_cache;
1157 } else {
1158 - start_addr = addr = TASK_UNMAPPED_BASE;
1159 - mm->cached_hole_size = 0;
1160 + start_addr = addr = mm->mmap_base;
1161 + mm->cached_hole_size = 0;
1162 }
1163
1164 full_search:
1165 @@ -94,14 +97,14 @@ full_search:
1166 * Start a new search - just in case we missed
1167 * some holes.
1168 */
1169 - if (start_addr != TASK_UNMAPPED_BASE) {
1170 - start_addr = addr = TASK_UNMAPPED_BASE;
1171 + if (start_addr != mm->mmap_base) {
1172 + start_addr = addr = mm->mmap_base;
1173 mm->cached_hole_size = 0;
1174 goto full_search;
1175 }
1176 return -ENOMEM;
1177 }
1178 - if (!vma || addr + len <= vma->vm_start) {
1179 + if (check_heap_stack_gap(vma, addr, len)) {
1180 /*
1181 * Remember the place where we stopped the search:
1182 */
1183 diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1184 index 8d97db2..b66cfa5 100644
1185 --- a/arch/arm/plat-s3c/pm.c
1186 +++ b/arch/arm/plat-s3c/pm.c
1187 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1188 s3c_pm_check_cleanup();
1189 }
1190
1191 -static struct platform_suspend_ops s3c_pm_ops = {
1192 +static const struct platform_suspend_ops s3c_pm_ops = {
1193 .enter = s3c_pm_enter,
1194 .prepare = s3c_pm_prepare,
1195 .finish = s3c_pm_finish,
1196 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1197 index d5d1d41..856e2ed 100644
1198 --- a/arch/avr32/include/asm/elf.h
1199 +++ b/arch/avr32/include/asm/elf.h
1200 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1201 the loader. We need to make sure that it is out of the way of the program
1202 that it will "exec", and that there is sufficient room for the brk. */
1203
1204 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1205 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1206
1207 +#ifdef CONFIG_PAX_ASLR
1208 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1209 +
1210 +#define PAX_DELTA_MMAP_LEN 15
1211 +#define PAX_DELTA_STACK_LEN 15
1212 +#endif
1213
1214 /* This yields a mask that user programs can use to figure out what
1215 instruction set this CPU supports. This could be done in user space,
1216 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1217 index b7f5c68..556135c 100644
1218 --- a/arch/avr32/include/asm/kmap_types.h
1219 +++ b/arch/avr32/include/asm/kmap_types.h
1220 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1221 D(11) KM_IRQ1,
1222 D(12) KM_SOFTIRQ0,
1223 D(13) KM_SOFTIRQ1,
1224 -D(14) KM_TYPE_NR
1225 +D(14) KM_CLEARPAGE,
1226 +D(15) KM_TYPE_NR
1227 };
1228
1229 #undef D
1230 diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1231 index f021edf..32d680e 100644
1232 --- a/arch/avr32/mach-at32ap/pm.c
1233 +++ b/arch/avr32/mach-at32ap/pm.c
1234 @@ -176,7 +176,7 @@ out:
1235 return 0;
1236 }
1237
1238 -static struct platform_suspend_ops avr32_pm_ops = {
1239 +static const struct platform_suspend_ops avr32_pm_ops = {
1240 .valid = avr32_pm_valid_state,
1241 .enter = avr32_pm_enter,
1242 };
1243 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1244 index b61d86d..e292c7f 100644
1245 --- a/arch/avr32/mm/fault.c
1246 +++ b/arch/avr32/mm/fault.c
1247 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1248
1249 int exception_trace = 1;
1250
1251 +#ifdef CONFIG_PAX_PAGEEXEC
1252 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1253 +{
1254 + unsigned long i;
1255 +
1256 + printk(KERN_ERR "PAX: bytes at PC: ");
1257 + for (i = 0; i < 20; i++) {
1258 + unsigned char c;
1259 + if (get_user(c, (unsigned char *)pc+i))
1260 + printk(KERN_CONT "???????? ");
1261 + else
1262 + printk(KERN_CONT "%02x ", c);
1263 + }
1264 + printk("\n");
1265 +}
1266 +#endif
1267 +
1268 /*
1269 * This routine handles page faults. It determines the address and the
1270 * problem, and then passes it off to one of the appropriate routines.
1271 @@ -157,6 +174,16 @@ bad_area:
1272 up_read(&mm->mmap_sem);
1273
1274 if (user_mode(regs)) {
1275 +
1276 +#ifdef CONFIG_PAX_PAGEEXEC
1277 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1278 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1279 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1280 + do_group_exit(SIGKILL);
1281 + }
1282 + }
1283 +#endif
1284 +
1285 if (exception_trace && printk_ratelimit())
1286 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1287 "sp %08lx ecr %lu\n",
1288 diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1289 index cce79d0..c406c85 100644
1290 --- a/arch/blackfin/kernel/kgdb.c
1291 +++ b/arch/blackfin/kernel/kgdb.c
1292 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1293 return -1; /* this means that we do not want to exit from the handler */
1294 }
1295
1296 -struct kgdb_arch arch_kgdb_ops = {
1297 +const struct kgdb_arch arch_kgdb_ops = {
1298 .gdb_bpt_instr = {0xa1},
1299 #ifdef CONFIG_SMP
1300 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1301 diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1302 index 8837be4..b2fb413 100644
1303 --- a/arch/blackfin/mach-common/pm.c
1304 +++ b/arch/blackfin/mach-common/pm.c
1305 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1306 return 0;
1307 }
1308
1309 -struct platform_suspend_ops bfin_pm_ops = {
1310 +const struct platform_suspend_ops bfin_pm_ops = {
1311 .enter = bfin_pm_enter,
1312 .valid = bfin_pm_valid,
1313 };
1314 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1315 index 00a57af..c3ef0cd 100644
1316 --- a/arch/frv/include/asm/atomic.h
1317 +++ b/arch/frv/include/asm/atomic.h
1318 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1319 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1320 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1321
1322 +#define atomic64_read_unchecked(v) atomic64_read(v)
1323 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1324 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1325 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1326 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1327 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1328 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1329 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1330 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1331 +
1332 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
1333 {
1334 int c, old;
1335 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1336 index f8e16b2..c73ff79 100644
1337 --- a/arch/frv/include/asm/kmap_types.h
1338 +++ b/arch/frv/include/asm/kmap_types.h
1339 @@ -23,6 +23,7 @@ enum km_type {
1340 KM_IRQ1,
1341 KM_SOFTIRQ0,
1342 KM_SOFTIRQ1,
1343 + KM_CLEARPAGE,
1344 KM_TYPE_NR
1345 };
1346
1347 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1348 index 385fd30..6c3d97e 100644
1349 --- a/arch/frv/mm/elf-fdpic.c
1350 +++ b/arch/frv/mm/elf-fdpic.c
1351 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1352 if (addr) {
1353 addr = PAGE_ALIGN(addr);
1354 vma = find_vma(current->mm, addr);
1355 - if (TASK_SIZE - len >= addr &&
1356 - (!vma || addr + len <= vma->vm_start))
1357 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1358 goto success;
1359 }
1360
1361 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1362 for (; vma; vma = vma->vm_next) {
1363 if (addr > limit)
1364 break;
1365 - if (addr + len <= vma->vm_start)
1366 + if (check_heap_stack_gap(vma, addr, len))
1367 goto success;
1368 addr = vma->vm_end;
1369 }
1370 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1371 for (; vma; vma = vma->vm_next) {
1372 if (addr > limit)
1373 break;
1374 - if (addr + len <= vma->vm_start)
1375 + if (check_heap_stack_gap(vma, addr, len))
1376 goto success;
1377 addr = vma->vm_end;
1378 }
1379 diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1380 index e4a80d8..11a7ea1 100644
1381 --- a/arch/ia64/hp/common/hwsw_iommu.c
1382 +++ b/arch/ia64/hp/common/hwsw_iommu.c
1383 @@ -17,7 +17,7 @@
1384 #include <linux/swiotlb.h>
1385 #include <asm/machvec.h>
1386
1387 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1388 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1389
1390 /* swiotlb declarations & definitions: */
1391 extern int swiotlb_late_init_with_default_size (size_t size);
1392 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1393 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1394 }
1395
1396 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1397 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1398 {
1399 if (use_swiotlb(dev))
1400 return &swiotlb_dma_ops;
1401 diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1402 index 01ae69b..35752fd 100644
1403 --- a/arch/ia64/hp/common/sba_iommu.c
1404 +++ b/arch/ia64/hp/common/sba_iommu.c
1405 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1406 },
1407 };
1408
1409 -extern struct dma_map_ops swiotlb_dma_ops;
1410 +extern const struct dma_map_ops swiotlb_dma_ops;
1411
1412 static int __init
1413 sba_init(void)
1414 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1415
1416 __setup("sbapagesize=",sba_page_override);
1417
1418 -struct dma_map_ops sba_dma_ops = {
1419 +const struct dma_map_ops sba_dma_ops = {
1420 .alloc_coherent = sba_alloc_coherent,
1421 .free_coherent = sba_free_coherent,
1422 .map_page = sba_map_page,
1423 diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1424 index c69552b..c7122f4 100644
1425 --- a/arch/ia64/ia32/binfmt_elf32.c
1426 +++ b/arch/ia64/ia32/binfmt_elf32.c
1427 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1428
1429 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1430
1431 +#ifdef CONFIG_PAX_ASLR
1432 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1433 +
1434 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1435 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1436 +#endif
1437 +
1438 /* Ugly but avoids duplication */
1439 #include "../../../fs/binfmt_elf.c"
1440
1441 diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1442 index 0f15349..26b3429 100644
1443 --- a/arch/ia64/ia32/ia32priv.h
1444 +++ b/arch/ia64/ia32/ia32priv.h
1445 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1446 #define ELF_DATA ELFDATA2LSB
1447 #define ELF_ARCH EM_386
1448
1449 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
1450 +#ifdef CONFIG_PAX_RANDUSTACK
1451 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
1452 +#else
1453 +#define __IA32_DELTA_STACK 0UL
1454 +#endif
1455 +
1456 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1457 +
1458 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1459 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1460
1461 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1462 index 88405cb..de5ca5d 100644
1463 --- a/arch/ia64/include/asm/atomic.h
1464 +++ b/arch/ia64/include/asm/atomic.h
1465 @@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
1466 #define atomic64_inc(v) atomic64_add(1, (v))
1467 #define atomic64_dec(v) atomic64_sub(1, (v))
1468
1469 +#define atomic64_read_unchecked(v) atomic64_read(v)
1470 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1471 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1472 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1473 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1474 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1475 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1476 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1477 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1478 +
1479 /* Atomic operations are already serializing */
1480 #define smp_mb__before_atomic_dec() barrier()
1481 #define smp_mb__after_atomic_dec() barrier()
1482 diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1483 index 8d3c79c..71b3af6 100644
1484 --- a/arch/ia64/include/asm/dma-mapping.h
1485 +++ b/arch/ia64/include/asm/dma-mapping.h
1486 @@ -12,7 +12,7 @@
1487
1488 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1489
1490 -extern struct dma_map_ops *dma_ops;
1491 +extern const struct dma_map_ops *dma_ops;
1492 extern struct ia64_machine_vector ia64_mv;
1493 extern void set_iommu_machvec(void);
1494
1495 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1496 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1497 dma_addr_t *daddr, gfp_t gfp)
1498 {
1499 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1500 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1501 void *caddr;
1502
1503 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1504 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1505 static inline void dma_free_coherent(struct device *dev, size_t size,
1506 void *caddr, dma_addr_t daddr)
1507 {
1508 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1509 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1510 debug_dma_free_coherent(dev, size, caddr, daddr);
1511 ops->free_coherent(dev, size, caddr, daddr);
1512 }
1513 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1514
1515 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1516 {
1517 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1518 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1519 return ops->mapping_error(dev, daddr);
1520 }
1521
1522 static inline int dma_supported(struct device *dev, u64 mask)
1523 {
1524 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1525 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1526 return ops->dma_supported(dev, mask);
1527 }
1528
1529 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1530 index 86eddee..b116bb4 100644
1531 --- a/arch/ia64/include/asm/elf.h
1532 +++ b/arch/ia64/include/asm/elf.h
1533 @@ -43,6 +43,13 @@
1534 */
1535 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1536
1537 +#ifdef CONFIG_PAX_ASLR
1538 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1539 +
1540 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1541 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1542 +#endif
1543 +
1544 #define PT_IA_64_UNWIND 0x70000001
1545
1546 /* IA-64 relocations: */
1547 diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1548 index 367d299..9ad4279 100644
1549 --- a/arch/ia64/include/asm/machvec.h
1550 +++ b/arch/ia64/include/asm/machvec.h
1551 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1552 /* DMA-mapping interface: */
1553 typedef void ia64_mv_dma_init (void);
1554 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1555 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1556 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1557
1558 /*
1559 * WARNING: The legacy I/O space is _architected_. Platforms are
1560 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1561 # endif /* CONFIG_IA64_GENERIC */
1562
1563 extern void swiotlb_dma_init(void);
1564 -extern struct dma_map_ops *dma_get_ops(struct device *);
1565 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1566
1567 /*
1568 * Define default versions so we can extend machvec for new platforms without having
1569 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1570 index 8840a69..cdb63d9 100644
1571 --- a/arch/ia64/include/asm/pgtable.h
1572 +++ b/arch/ia64/include/asm/pgtable.h
1573 @@ -12,7 +12,7 @@
1574 * David Mosberger-Tang <davidm@hpl.hp.com>
1575 */
1576
1577 -
1578 +#include <linux/const.h>
1579 #include <asm/mman.h>
1580 #include <asm/page.h>
1581 #include <asm/processor.h>
1582 @@ -143,6 +143,17 @@
1583 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1584 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1585 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1586 +
1587 +#ifdef CONFIG_PAX_PAGEEXEC
1588 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1589 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1590 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1591 +#else
1592 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1593 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1594 +# define PAGE_COPY_NOEXEC PAGE_COPY
1595 +#endif
1596 +
1597 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1598 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1599 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1600 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1601 index 239ecdc..f94170e 100644
1602 --- a/arch/ia64/include/asm/spinlock.h
1603 +++ b/arch/ia64/include/asm/spinlock.h
1604 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1605 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1606
1607 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1608 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1609 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1610 }
1611
1612 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1613 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1614 index 449c8c0..432a3d2 100644
1615 --- a/arch/ia64/include/asm/uaccess.h
1616 +++ b/arch/ia64/include/asm/uaccess.h
1617 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1618 const void *__cu_from = (from); \
1619 long __cu_len = (n); \
1620 \
1621 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1622 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1623 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1624 __cu_len; \
1625 })
1626 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1627 long __cu_len = (n); \
1628 \
1629 __chk_user_ptr(__cu_from); \
1630 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1631 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1632 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1633 __cu_len; \
1634 })
1635 diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1636 index f2c1600..969398a 100644
1637 --- a/arch/ia64/kernel/dma-mapping.c
1638 +++ b/arch/ia64/kernel/dma-mapping.c
1639 @@ -3,7 +3,7 @@
1640 /* Set this to 1 if there is a HW IOMMU in the system */
1641 int iommu_detected __read_mostly;
1642
1643 -struct dma_map_ops *dma_ops;
1644 +const struct dma_map_ops *dma_ops;
1645 EXPORT_SYMBOL(dma_ops);
1646
1647 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1648 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1649 }
1650 fs_initcall(dma_init);
1651
1652 -struct dma_map_ops *dma_get_ops(struct device *dev)
1653 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1654 {
1655 return dma_ops;
1656 }
1657 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1658 index 1481b0a..e7d38ff 100644
1659 --- a/arch/ia64/kernel/module.c
1660 +++ b/arch/ia64/kernel/module.c
1661 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1662 void
1663 module_free (struct module *mod, void *module_region)
1664 {
1665 - if (mod && mod->arch.init_unw_table &&
1666 - module_region == mod->module_init) {
1667 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1668 unw_remove_unwind_table(mod->arch.init_unw_table);
1669 mod->arch.init_unw_table = NULL;
1670 }
1671 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1672 }
1673
1674 static inline int
1675 +in_init_rx (const struct module *mod, uint64_t addr)
1676 +{
1677 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1678 +}
1679 +
1680 +static inline int
1681 +in_init_rw (const struct module *mod, uint64_t addr)
1682 +{
1683 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1684 +}
1685 +
1686 +static inline int
1687 in_init (const struct module *mod, uint64_t addr)
1688 {
1689 - return addr - (uint64_t) mod->module_init < mod->init_size;
1690 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1691 +}
1692 +
1693 +static inline int
1694 +in_core_rx (const struct module *mod, uint64_t addr)
1695 +{
1696 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1697 +}
1698 +
1699 +static inline int
1700 +in_core_rw (const struct module *mod, uint64_t addr)
1701 +{
1702 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1703 }
1704
1705 static inline int
1706 in_core (const struct module *mod, uint64_t addr)
1707 {
1708 - return addr - (uint64_t) mod->module_core < mod->core_size;
1709 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1710 }
1711
1712 static inline int
1713 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1714 break;
1715
1716 case RV_BDREL:
1717 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1718 + if (in_init_rx(mod, val))
1719 + val -= (uint64_t) mod->module_init_rx;
1720 + else if (in_init_rw(mod, val))
1721 + val -= (uint64_t) mod->module_init_rw;
1722 + else if (in_core_rx(mod, val))
1723 + val -= (uint64_t) mod->module_core_rx;
1724 + else if (in_core_rw(mod, val))
1725 + val -= (uint64_t) mod->module_core_rw;
1726 break;
1727
1728 case RV_LTV:
1729 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1730 * addresses have been selected...
1731 */
1732 uint64_t gp;
1733 - if (mod->core_size > MAX_LTOFF)
1734 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1735 /*
1736 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1737 * at the end of the module.
1738 */
1739 - gp = mod->core_size - MAX_LTOFF / 2;
1740 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1741 else
1742 - gp = mod->core_size / 2;
1743 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1744 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1745 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1746 mod->arch.gp = gp;
1747 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1748 }
1749 diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1750 index f6b1ff0..de773fb 100644
1751 --- a/arch/ia64/kernel/pci-dma.c
1752 +++ b/arch/ia64/kernel/pci-dma.c
1753 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1754 .dma_mask = &fallback_dev.coherent_dma_mask,
1755 };
1756
1757 -extern struct dma_map_ops intel_dma_ops;
1758 +extern const struct dma_map_ops intel_dma_ops;
1759
1760 static int __init pci_iommu_init(void)
1761 {
1762 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1763 }
1764 EXPORT_SYMBOL(iommu_dma_supported);
1765
1766 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1767 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1768 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1769 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1770 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1771 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1772 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1773 +
1774 +static const struct dma_map_ops intel_iommu_dma_ops = {
1775 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1776 + .alloc_coherent = intel_alloc_coherent,
1777 + .free_coherent = intel_free_coherent,
1778 + .map_sg = intel_map_sg,
1779 + .unmap_sg = intel_unmap_sg,
1780 + .map_page = intel_map_page,
1781 + .unmap_page = intel_unmap_page,
1782 + .mapping_error = intel_mapping_error,
1783 +
1784 + .sync_single_for_cpu = machvec_dma_sync_single,
1785 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1786 + .sync_single_for_device = machvec_dma_sync_single,
1787 + .sync_sg_for_device = machvec_dma_sync_sg,
1788 + .dma_supported = iommu_dma_supported,
1789 +};
1790 +
1791 void __init pci_iommu_alloc(void)
1792 {
1793 - dma_ops = &intel_dma_ops;
1794 -
1795 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1796 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1797 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1798 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1799 - dma_ops->dma_supported = iommu_dma_supported;
1800 + dma_ops = &intel_iommu_dma_ops;
1801
1802 /*
1803 * The order of these functions is important for
1804 diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1805 index 285aae8..61dbab6 100644
1806 --- a/arch/ia64/kernel/pci-swiotlb.c
1807 +++ b/arch/ia64/kernel/pci-swiotlb.c
1808 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1809 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1810 }
1811
1812 -struct dma_map_ops swiotlb_dma_ops = {
1813 +const struct dma_map_ops swiotlb_dma_ops = {
1814 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1815 .free_coherent = swiotlb_free_coherent,
1816 .map_page = swiotlb_map_page,
1817 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1818 index 609d500..7dde2a8 100644
1819 --- a/arch/ia64/kernel/sys_ia64.c
1820 +++ b/arch/ia64/kernel/sys_ia64.c
1821 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1822 if (REGION_NUMBER(addr) == RGN_HPAGE)
1823 addr = 0;
1824 #endif
1825 +
1826 +#ifdef CONFIG_PAX_RANDMMAP
1827 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1828 + addr = mm->free_area_cache;
1829 + else
1830 +#endif
1831 +
1832 if (!addr)
1833 addr = mm->free_area_cache;
1834
1835 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1836 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1837 /* At this point: (!vma || addr < vma->vm_end). */
1838 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1839 - if (start_addr != TASK_UNMAPPED_BASE) {
1840 + if (start_addr != mm->mmap_base) {
1841 /* Start a new search --- just in case we missed some holes. */
1842 - addr = TASK_UNMAPPED_BASE;
1843 + addr = mm->mmap_base;
1844 goto full_search;
1845 }
1846 return -ENOMEM;
1847 }
1848 - if (!vma || addr + len <= vma->vm_start) {
1849 + if (check_heap_stack_gap(vma, addr, len)) {
1850 /* Remember the address where we stopped this search: */
1851 mm->free_area_cache = addr + len;
1852 return addr;
1853 diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1854 index 8f06035..b3a5818 100644
1855 --- a/arch/ia64/kernel/topology.c
1856 +++ b/arch/ia64/kernel/topology.c
1857 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1858 return ret;
1859 }
1860
1861 -static struct sysfs_ops cache_sysfs_ops = {
1862 +static const struct sysfs_ops cache_sysfs_ops = {
1863 .show = cache_show
1864 };
1865
1866 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1867 index 0a0c77b..8e55a81 100644
1868 --- a/arch/ia64/kernel/vmlinux.lds.S
1869 +++ b/arch/ia64/kernel/vmlinux.lds.S
1870 @@ -190,7 +190,7 @@ SECTIONS
1871 /* Per-cpu data: */
1872 . = ALIGN(PERCPU_PAGE_SIZE);
1873 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1874 - __phys_per_cpu_start = __per_cpu_load;
1875 + __phys_per_cpu_start = per_cpu_load;
1876 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1877 * into percpu page size
1878 */
1879 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1880 index 19261a9..1611b7a 100644
1881 --- a/arch/ia64/mm/fault.c
1882 +++ b/arch/ia64/mm/fault.c
1883 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1884 return pte_present(pte);
1885 }
1886
1887 +#ifdef CONFIG_PAX_PAGEEXEC
1888 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1889 +{
1890 + unsigned long i;
1891 +
1892 + printk(KERN_ERR "PAX: bytes at PC: ");
1893 + for (i = 0; i < 8; i++) {
1894 + unsigned int c;
1895 + if (get_user(c, (unsigned int *)pc+i))
1896 + printk(KERN_CONT "???????? ");
1897 + else
1898 + printk(KERN_CONT "%08x ", c);
1899 + }
1900 + printk("\n");
1901 +}
1902 +#endif
1903 +
1904 void __kprobes
1905 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1906 {
1907 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1908 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1909 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1910
1911 - if ((vma->vm_flags & mask) != mask)
1912 + if ((vma->vm_flags & mask) != mask) {
1913 +
1914 +#ifdef CONFIG_PAX_PAGEEXEC
1915 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1916 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1917 + goto bad_area;
1918 +
1919 + up_read(&mm->mmap_sem);
1920 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1921 + do_group_exit(SIGKILL);
1922 + }
1923 +#endif
1924 +
1925 goto bad_area;
1926
1927 + }
1928 +
1929 survive:
1930 /*
1931 * If for any reason at all we couldn't handle the fault, make
1932 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1933 index b0f6157..a082bbc 100644
1934 --- a/arch/ia64/mm/hugetlbpage.c
1935 +++ b/arch/ia64/mm/hugetlbpage.c
1936 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1937 /* At this point: (!vmm || addr < vmm->vm_end). */
1938 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1939 return -ENOMEM;
1940 - if (!vmm || (addr + len) <= vmm->vm_start)
1941 + if (check_heap_stack_gap(vmm, addr, len))
1942 return addr;
1943 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1944 }
1945 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1946 index 1857766..05cc6a3 100644
1947 --- a/arch/ia64/mm/init.c
1948 +++ b/arch/ia64/mm/init.c
1949 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1950 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1951 vma->vm_end = vma->vm_start + PAGE_SIZE;
1952 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1953 +
1954 +#ifdef CONFIG_PAX_PAGEEXEC
1955 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1956 + vma->vm_flags &= ~VM_EXEC;
1957 +
1958 +#ifdef CONFIG_PAX_MPROTECT
1959 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1960 + vma->vm_flags &= ~VM_MAYEXEC;
1961 +#endif
1962 +
1963 + }
1964 +#endif
1965 +
1966 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1967 down_write(&current->mm->mmap_sem);
1968 if (insert_vm_struct(current->mm, vma)) {
1969 diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1970 index 98b6849..8046766 100644
1971 --- a/arch/ia64/sn/pci/pci_dma.c
1972 +++ b/arch/ia64/sn/pci/pci_dma.c
1973 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1974 return ret;
1975 }
1976
1977 -static struct dma_map_ops sn_dma_ops = {
1978 +static const struct dma_map_ops sn_dma_ops = {
1979 .alloc_coherent = sn_dma_alloc_coherent,
1980 .free_coherent = sn_dma_free_coherent,
1981 .map_page = sn_dma_map_page,
1982 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1983 index 82abd15..d95ae5d 100644
1984 --- a/arch/m32r/lib/usercopy.c
1985 +++ b/arch/m32r/lib/usercopy.c
1986 @@ -14,6 +14,9 @@
1987 unsigned long
1988 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1989 {
1990 + if ((long)n < 0)
1991 + return n;
1992 +
1993 prefetch(from);
1994 if (access_ok(VERIFY_WRITE, to, n))
1995 __copy_user(to,from,n);
1996 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1997 unsigned long
1998 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1999 {
2000 + if ((long)n < 0)
2001 + return n;
2002 +
2003 prefetchw(to);
2004 if (access_ok(VERIFY_READ, from, n))
2005 __copy_user_zeroing(to,from,n);
2006 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
2007 index fd7620f..63d73a6 100644
2008 --- a/arch/mips/Kconfig
2009 +++ b/arch/mips/Kconfig
2010 @@ -5,6 +5,7 @@ config MIPS
2011 select HAVE_IDE
2012 select HAVE_OPROFILE
2013 select HAVE_ARCH_KGDB
2014 + select GENERIC_ATOMIC64 if !64BIT
2015 # Horrible source of confusion. Die, die, die ...
2016 select EMBEDDED
2017 select RTC_LIB if !LEMOTE_FULOONG2E
2018 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2019 index 77f5021..2b1db8a 100644
2020 --- a/arch/mips/Makefile
2021 +++ b/arch/mips/Makefile
2022 @@ -51,6 +51,8 @@ endif
2023 cflags-y := -ffunction-sections
2024 cflags-y += $(call cc-option, -mno-check-zero-division)
2025
2026 +cflags-y += -Wno-sign-compare -Wno-extra
2027 +
2028 ifdef CONFIG_32BIT
2029 ld-emul = $(32bit-emul)
2030 vmlinux-32 = vmlinux
2031 diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
2032 index 632f986..fd0378d 100644
2033 --- a/arch/mips/alchemy/devboards/pm.c
2034 +++ b/arch/mips/alchemy/devboards/pm.c
2035 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
2036
2037 }
2038
2039 -static struct platform_suspend_ops db1x_pm_ops = {
2040 +static const struct platform_suspend_ops db1x_pm_ops = {
2041 .valid = suspend_valid_only_mem,
2042 .begin = db1x_pm_begin,
2043 .enter = db1x_pm_enter,
2044 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2045 index 09e7128..111035b 100644
2046 --- a/arch/mips/include/asm/atomic.h
2047 +++ b/arch/mips/include/asm/atomic.h
2048 @@ -21,6 +21,10 @@
2049 #include <asm/war.h>
2050 #include <asm/system.h>
2051
2052 +#ifdef CONFIG_GENERIC_ATOMIC64
2053 +#include <asm-generic/atomic64.h>
2054 +#endif
2055 +
2056 #define ATOMIC_INIT(i) { (i) }
2057
2058 /*
2059 @@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2060 */
2061 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2062
2063 +#define atomic64_read_unchecked(v) atomic64_read(v)
2064 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2065 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2066 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2067 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2068 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2069 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2070 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2071 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2072 +
2073 #endif /* CONFIG_64BIT */
2074
2075 /*
2076 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2077 index 7990694..4e93acf 100644
2078 --- a/arch/mips/include/asm/elf.h
2079 +++ b/arch/mips/include/asm/elf.h
2080 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
2081 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2082 #endif
2083
2084 +#ifdef CONFIG_PAX_ASLR
2085 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2086 +
2087 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2088 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2089 +#endif
2090 +
2091 #endif /* _ASM_ELF_H */
2092 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2093 index f266295..627cfff 100644
2094 --- a/arch/mips/include/asm/page.h
2095 +++ b/arch/mips/include/asm/page.h
2096 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2097 #ifdef CONFIG_CPU_MIPS32
2098 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2099 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2100 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2101 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2102 #else
2103 typedef struct { unsigned long long pte; } pte_t;
2104 #define pte_val(x) ((x).pte)
2105 diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
2106 index e48c0bf..f3acf65 100644
2107 --- a/arch/mips/include/asm/reboot.h
2108 +++ b/arch/mips/include/asm/reboot.h
2109 @@ -9,7 +9,7 @@
2110 #ifndef _ASM_REBOOT_H
2111 #define _ASM_REBOOT_H
2112
2113 -extern void (*_machine_restart)(char *command);
2114 -extern void (*_machine_halt)(void);
2115 +extern void (*__noreturn _machine_restart)(char *command);
2116 +extern void (*__noreturn _machine_halt)(void);
2117
2118 #endif /* _ASM_REBOOT_H */
2119 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2120 index 83b5509..9fa24a23 100644
2121 --- a/arch/mips/include/asm/system.h
2122 +++ b/arch/mips/include/asm/system.h
2123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2124 */
2125 #define __ARCH_WANT_UNLOCKED_CTXSW
2126
2127 -extern unsigned long arch_align_stack(unsigned long sp);
2128 +#define arch_align_stack(x) ((x) & ~0xfUL)
2129
2130 #endif /* _ASM_SYSTEM_H */
2131 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2132 index 9fdd8bc..fcf9d68 100644
2133 --- a/arch/mips/kernel/binfmt_elfn32.c
2134 +++ b/arch/mips/kernel/binfmt_elfn32.c
2135 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2136 #undef ELF_ET_DYN_BASE
2137 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2138
2139 +#ifdef CONFIG_PAX_ASLR
2140 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2141 +
2142 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2143 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2144 +#endif
2145 +
2146 #include <asm/processor.h>
2147 #include <linux/module.h>
2148 #include <linux/elfcore.h>
2149 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2150 index ff44823..cf0b48a 100644
2151 --- a/arch/mips/kernel/binfmt_elfo32.c
2152 +++ b/arch/mips/kernel/binfmt_elfo32.c
2153 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2154 #undef ELF_ET_DYN_BASE
2155 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2156
2157 +#ifdef CONFIG_PAX_ASLR
2158 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2159 +
2160 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2161 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2162 +#endif
2163 +
2164 #include <asm/processor.h>
2165
2166 /*
2167 diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2168 index 50c9bb8..efdd5f8 100644
2169 --- a/arch/mips/kernel/kgdb.c
2170 +++ b/arch/mips/kernel/kgdb.c
2171 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2172 return -1;
2173 }
2174
2175 +/* cannot be const */
2176 struct kgdb_arch arch_kgdb_ops;
2177
2178 /*
2179 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2180 index f3d73e1..bb3f57a 100644
2181 --- a/arch/mips/kernel/process.c
2182 +++ b/arch/mips/kernel/process.c
2183 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2184 out:
2185 return pc;
2186 }
2187 -
2188 -/*
2189 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2190 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2191 - */
2192 -unsigned long arch_align_stack(unsigned long sp)
2193 -{
2194 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2195 - sp -= get_random_int() & ~PAGE_MASK;
2196 -
2197 - return sp & ALMASK;
2198 -}
2199 diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2200 index 060563a..7fbf310 100644
2201 --- a/arch/mips/kernel/reset.c
2202 +++ b/arch/mips/kernel/reset.c
2203 @@ -19,8 +19,8 @@
2204 * So handle all using function pointers to machine specific
2205 * functions.
2206 */
2207 -void (*_machine_restart)(char *command);
2208 -void (*_machine_halt)(void);
2209 +void (*__noreturn _machine_restart)(char *command);
2210 +void (*__noreturn _machine_halt)(void);
2211 void (*pm_power_off)(void);
2212
2213 EXPORT_SYMBOL(pm_power_off);
2214 @@ -29,16 +29,19 @@ void machine_restart(char *command)
2215 {
2216 if (_machine_restart)
2217 _machine_restart(command);
2218 + BUG();
2219 }
2220
2221 void machine_halt(void)
2222 {
2223 if (_machine_halt)
2224 _machine_halt();
2225 + BUG();
2226 }
2227
2228 void machine_power_off(void)
2229 {
2230 if (pm_power_off)
2231 pm_power_off();
2232 + BUG();
2233 }
2234 diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2235 index 3f7f466..3abe0b5 100644
2236 --- a/arch/mips/kernel/syscall.c
2237 +++ b/arch/mips/kernel/syscall.c
2238 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2239 do_color_align = 0;
2240 if (filp || (flags & MAP_SHARED))
2241 do_color_align = 1;
2242 +
2243 +#ifdef CONFIG_PAX_RANDMMAP
2244 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2245 +#endif
2246 +
2247 if (addr) {
2248 if (do_color_align)
2249 addr = COLOUR_ALIGN(addr, pgoff);
2250 else
2251 addr = PAGE_ALIGN(addr);
2252 vmm = find_vma(current->mm, addr);
2253 - if (task_size - len >= addr &&
2254 - (!vmm || addr + len <= vmm->vm_start))
2255 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2256 return addr;
2257 }
2258 - addr = TASK_UNMAPPED_BASE;
2259 + addr = current->mm->mmap_base;
2260 if (do_color_align)
2261 addr = COLOUR_ALIGN(addr, pgoff);
2262 else
2263 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2264 /* At this point: (!vmm || addr < vmm->vm_end). */
2265 if (task_size - len < addr)
2266 return -ENOMEM;
2267 - if (!vmm || addr + len <= vmm->vm_start)
2268 + if (check_heap_stack_gap(vmm, addr, len))
2269 return addr;
2270 addr = vmm->vm_end;
2271 if (do_color_align)
2272 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2273 index e97a7a2..f18f5b0 100644
2274 --- a/arch/mips/mm/fault.c
2275 +++ b/arch/mips/mm/fault.c
2276 @@ -26,6 +26,23 @@
2277 #include <asm/ptrace.h>
2278 #include <asm/highmem.h> /* For VMALLOC_END */
2279
2280 +#ifdef CONFIG_PAX_PAGEEXEC
2281 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2282 +{
2283 + unsigned long i;
2284 +
2285 + printk(KERN_ERR "PAX: bytes at PC: ");
2286 + for (i = 0; i < 5; i++) {
2287 + unsigned int c;
2288 + if (get_user(c, (unsigned int *)pc+i))
2289 + printk(KERN_CONT "???????? ");
2290 + else
2291 + printk(KERN_CONT "%08x ", c);
2292 + }
2293 + printk("\n");
2294 +}
2295 +#endif
2296 +
2297 /*
2298 * This routine handles page faults. It determines the address,
2299 * and the problem, and then passes it off to one of the appropriate
2300 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2301 index 8bc9e96..26554f8 100644
2302 --- a/arch/parisc/include/asm/atomic.h
2303 +++ b/arch/parisc/include/asm/atomic.h
2304 @@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2305
2306 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2307
2308 +#define atomic64_read_unchecked(v) atomic64_read(v)
2309 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2310 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2311 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2312 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2313 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2314 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2315 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2316 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2317 +
2318 #else /* CONFIG_64BIT */
2319
2320 #include <asm-generic/atomic64.h>
2321 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2322 index 9c802eb..0592e41 100644
2323 --- a/arch/parisc/include/asm/elf.h
2324 +++ b/arch/parisc/include/asm/elf.h
2325 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2326
2327 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2328
2329 +#ifdef CONFIG_PAX_ASLR
2330 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2331 +
2332 +#define PAX_DELTA_MMAP_LEN 16
2333 +#define PAX_DELTA_STACK_LEN 16
2334 +#endif
2335 +
2336 /* This yields a mask that user programs can use to figure out what
2337 instruction set this CPU supports. This could be done in user space,
2338 but it's not easy, and we've already done it here. */
2339 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2340 index a27d2e2..18fd845 100644
2341 --- a/arch/parisc/include/asm/pgtable.h
2342 +++ b/arch/parisc/include/asm/pgtable.h
2343 @@ -207,6 +207,17 @@
2344 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2345 #define PAGE_COPY PAGE_EXECREAD
2346 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2347 +
2348 +#ifdef CONFIG_PAX_PAGEEXEC
2349 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2350 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2351 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2352 +#else
2353 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2354 +# define PAGE_COPY_NOEXEC PAGE_COPY
2355 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2356 +#endif
2357 +
2358 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2359 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2360 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2361 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2362 index 2120746..8d70a5e 100644
2363 --- a/arch/parisc/kernel/module.c
2364 +++ b/arch/parisc/kernel/module.c
2365 @@ -95,16 +95,38 @@
2366
2367 /* three functions to determine where in the module core
2368 * or init pieces the location is */
2369 +static inline int in_init_rx(struct module *me, void *loc)
2370 +{
2371 + return (loc >= me->module_init_rx &&
2372 + loc < (me->module_init_rx + me->init_size_rx));
2373 +}
2374 +
2375 +static inline int in_init_rw(struct module *me, void *loc)
2376 +{
2377 + return (loc >= me->module_init_rw &&
2378 + loc < (me->module_init_rw + me->init_size_rw));
2379 +}
2380 +
2381 static inline int in_init(struct module *me, void *loc)
2382 {
2383 - return (loc >= me->module_init &&
2384 - loc <= (me->module_init + me->init_size));
2385 + return in_init_rx(me, loc) || in_init_rw(me, loc);
2386 +}
2387 +
2388 +static inline int in_core_rx(struct module *me, void *loc)
2389 +{
2390 + return (loc >= me->module_core_rx &&
2391 + loc < (me->module_core_rx + me->core_size_rx));
2392 +}
2393 +
2394 +static inline int in_core_rw(struct module *me, void *loc)
2395 +{
2396 + return (loc >= me->module_core_rw &&
2397 + loc < (me->module_core_rw + me->core_size_rw));
2398 }
2399
2400 static inline int in_core(struct module *me, void *loc)
2401 {
2402 - return (loc >= me->module_core &&
2403 - loc <= (me->module_core + me->core_size));
2404 + return in_core_rx(me, loc) || in_core_rw(me, loc);
2405 }
2406
2407 static inline int in_local(struct module *me, void *loc)
2408 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2409 }
2410
2411 /* align things a bit */
2412 - me->core_size = ALIGN(me->core_size, 16);
2413 - me->arch.got_offset = me->core_size;
2414 - me->core_size += gots * sizeof(struct got_entry);
2415 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2416 + me->arch.got_offset = me->core_size_rw;
2417 + me->core_size_rw += gots * sizeof(struct got_entry);
2418
2419 - me->core_size = ALIGN(me->core_size, 16);
2420 - me->arch.fdesc_offset = me->core_size;
2421 - me->core_size += fdescs * sizeof(Elf_Fdesc);
2422 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2423 + me->arch.fdesc_offset = me->core_size_rw;
2424 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2425
2426 me->arch.got_max = gots;
2427 me->arch.fdesc_max = fdescs;
2428 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2429
2430 BUG_ON(value == 0);
2431
2432 - got = me->module_core + me->arch.got_offset;
2433 + got = me->module_core_rw + me->arch.got_offset;
2434 for (i = 0; got[i].addr; i++)
2435 if (got[i].addr == value)
2436 goto out;
2437 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2438 #ifdef CONFIG_64BIT
2439 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2440 {
2441 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2442 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2443
2444 if (!value) {
2445 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2446 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2447
2448 /* Create new one */
2449 fdesc->addr = value;
2450 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2451 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2452 return (Elf_Addr)fdesc;
2453 }
2454 #endif /* CONFIG_64BIT */
2455 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2456
2457 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2458 end = table + sechdrs[me->arch.unwind_section].sh_size;
2459 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2460 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2461
2462 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2463 me->arch.unwind_section, table, end, gp);
2464 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2465 index 9147391..f3d949a 100644
2466 --- a/arch/parisc/kernel/sys_parisc.c
2467 +++ b/arch/parisc/kernel/sys_parisc.c
2468 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2469 /* At this point: (!vma || addr < vma->vm_end). */
2470 if (TASK_SIZE - len < addr)
2471 return -ENOMEM;
2472 - if (!vma || addr + len <= vma->vm_start)
2473 + if (check_heap_stack_gap(vma, addr, len))
2474 return addr;
2475 addr = vma->vm_end;
2476 }
2477 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2478 /* At this point: (!vma || addr < vma->vm_end). */
2479 if (TASK_SIZE - len < addr)
2480 return -ENOMEM;
2481 - if (!vma || addr + len <= vma->vm_start)
2482 + if (check_heap_stack_gap(vma, addr, len))
2483 return addr;
2484 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2485 if (addr < vma->vm_end) /* handle wraparound */
2486 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2487 if (flags & MAP_FIXED)
2488 return addr;
2489 if (!addr)
2490 - addr = TASK_UNMAPPED_BASE;
2491 + addr = current->mm->mmap_base;
2492
2493 if (filp) {
2494 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2495 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2496 index 8b58bf0..7afff03 100644
2497 --- a/arch/parisc/kernel/traps.c
2498 +++ b/arch/parisc/kernel/traps.c
2499 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2500
2501 down_read(&current->mm->mmap_sem);
2502 vma = find_vma(current->mm,regs->iaoq[0]);
2503 - if (vma && (regs->iaoq[0] >= vma->vm_start)
2504 - && (vma->vm_flags & VM_EXEC)) {
2505 -
2506 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2507 fault_address = regs->iaoq[0];
2508 fault_space = regs->iasq[0];
2509
2510 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2511 index c6afbfc..c5839f6 100644
2512 --- a/arch/parisc/mm/fault.c
2513 +++ b/arch/parisc/mm/fault.c
2514 @@ -15,6 +15,7 @@
2515 #include <linux/sched.h>
2516 #include <linux/interrupt.h>
2517 #include <linux/module.h>
2518 +#include <linux/unistd.h>
2519
2520 #include <asm/uaccess.h>
2521 #include <asm/traps.h>
2522 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2523 static unsigned long
2524 parisc_acctyp(unsigned long code, unsigned int inst)
2525 {
2526 - if (code == 6 || code == 16)
2527 + if (code == 6 || code == 7 || code == 16)
2528 return VM_EXEC;
2529
2530 switch (inst & 0xf0000000) {
2531 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2532 }
2533 #endif
2534
2535 +#ifdef CONFIG_PAX_PAGEEXEC
2536 +/*
2537 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2538 + *
2539 + * returns 1 when task should be killed
2540 + * 2 when rt_sigreturn trampoline was detected
2541 + * 3 when unpatched PLT trampoline was detected
2542 + */
2543 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2544 +{
2545 +
2546 +#ifdef CONFIG_PAX_EMUPLT
2547 + int err;
2548 +
2549 + do { /* PaX: unpatched PLT emulation */
2550 + unsigned int bl, depwi;
2551 +
2552 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2553 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2554 +
2555 + if (err)
2556 + break;
2557 +
2558 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2559 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2560 +
2561 + err = get_user(ldw, (unsigned int *)addr);
2562 + err |= get_user(bv, (unsigned int *)(addr+4));
2563 + err |= get_user(ldw2, (unsigned int *)(addr+8));
2564 +
2565 + if (err)
2566 + break;
2567 +
2568 + if (ldw == 0x0E801096U &&
2569 + bv == 0xEAC0C000U &&
2570 + ldw2 == 0x0E881095U)
2571 + {
2572 + unsigned int resolver, map;
2573 +
2574 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2575 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2576 + if (err)
2577 + break;
2578 +
2579 + regs->gr[20] = instruction_pointer(regs)+8;
2580 + regs->gr[21] = map;
2581 + regs->gr[22] = resolver;
2582 + regs->iaoq[0] = resolver | 3UL;
2583 + regs->iaoq[1] = regs->iaoq[0] + 4;
2584 + return 3;
2585 + }
2586 + }
2587 + } while (0);
2588 +#endif
2589 +
2590 +#ifdef CONFIG_PAX_EMUTRAMP
2591 +
2592 +#ifndef CONFIG_PAX_EMUSIGRT
2593 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2594 + return 1;
2595 +#endif
2596 +
2597 + do { /* PaX: rt_sigreturn emulation */
2598 + unsigned int ldi1, ldi2, bel, nop;
2599 +
2600 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2601 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2602 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2603 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2604 +
2605 + if (err)
2606 + break;
2607 +
2608 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2609 + ldi2 == 0x3414015AU &&
2610 + bel == 0xE4008200U &&
2611 + nop == 0x08000240U)
2612 + {
2613 + regs->gr[25] = (ldi1 & 2) >> 1;
2614 + regs->gr[20] = __NR_rt_sigreturn;
2615 + regs->gr[31] = regs->iaoq[1] + 16;
2616 + regs->sr[0] = regs->iasq[1];
2617 + regs->iaoq[0] = 0x100UL;
2618 + regs->iaoq[1] = regs->iaoq[0] + 4;
2619 + regs->iasq[0] = regs->sr[2];
2620 + regs->iasq[1] = regs->sr[2];
2621 + return 2;
2622 + }
2623 + } while (0);
2624 +#endif
2625 +
2626 + return 1;
2627 +}
2628 +
2629 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2630 +{
2631 + unsigned long i;
2632 +
2633 + printk(KERN_ERR "PAX: bytes at PC: ");
2634 + for (i = 0; i < 5; i++) {
2635 + unsigned int c;
2636 + if (get_user(c, (unsigned int *)pc+i))
2637 + printk(KERN_CONT "???????? ");
2638 + else
2639 + printk(KERN_CONT "%08x ", c);
2640 + }
2641 + printk("\n");
2642 +}
2643 +#endif
2644 +
2645 int fixup_exception(struct pt_regs *regs)
2646 {
2647 const struct exception_table_entry *fix;
2648 @@ -192,8 +303,33 @@ good_area:
2649
2650 acc_type = parisc_acctyp(code,regs->iir);
2651
2652 - if ((vma->vm_flags & acc_type) != acc_type)
2653 + if ((vma->vm_flags & acc_type) != acc_type) {
2654 +
2655 +#ifdef CONFIG_PAX_PAGEEXEC
2656 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2657 + (address & ~3UL) == instruction_pointer(regs))
2658 + {
2659 + up_read(&mm->mmap_sem);
2660 + switch (pax_handle_fetch_fault(regs)) {
2661 +
2662 +#ifdef CONFIG_PAX_EMUPLT
2663 + case 3:
2664 + return;
2665 +#endif
2666 +
2667 +#ifdef CONFIG_PAX_EMUTRAMP
2668 + case 2:
2669 + return;
2670 +#endif
2671 +
2672 + }
2673 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2674 + do_group_exit(SIGKILL);
2675 + }
2676 +#endif
2677 +
2678 goto bad_area;
2679 + }
2680
2681 /*
2682 * If for any reason at all we couldn't handle the fault, make
2683 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2684 index c107b74..409dc0f 100644
2685 --- a/arch/powerpc/Makefile
2686 +++ b/arch/powerpc/Makefile
2687 @@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2688 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2689 CPP = $(CC) -E $(KBUILD_CFLAGS)
2690
2691 +cflags-y += -Wno-sign-compare -Wno-extra
2692 +
2693 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2694
2695 ifeq ($(CONFIG_PPC64),y)
2696 diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2697 index 6d94d27..50d4cad 100644
2698 --- a/arch/powerpc/include/asm/device.h
2699 +++ b/arch/powerpc/include/asm/device.h
2700 @@ -14,7 +14,7 @@ struct dev_archdata {
2701 struct device_node *of_node;
2702
2703 /* DMA operations on that device */
2704 - struct dma_map_ops *dma_ops;
2705 + const struct dma_map_ops *dma_ops;
2706
2707 /*
2708 * When an iommu is in use, dma_data is used as a ptr to the base of the
2709 diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2710 index e281dae..2b8a784 100644
2711 --- a/arch/powerpc/include/asm/dma-mapping.h
2712 +++ b/arch/powerpc/include/asm/dma-mapping.h
2713 @@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2714 #ifdef CONFIG_PPC64
2715 extern struct dma_map_ops dma_iommu_ops;
2716 #endif
2717 -extern struct dma_map_ops dma_direct_ops;
2718 +extern const struct dma_map_ops dma_direct_ops;
2719
2720 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2721 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2722 {
2723 /* We don't handle the NULL dev case for ISA for now. We could
2724 * do it via an out of line call but it is not needed for now. The
2725 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2726 return dev->archdata.dma_ops;
2727 }
2728
2729 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2730 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2731 {
2732 dev->archdata.dma_ops = ops;
2733 }
2734 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2735
2736 static inline int dma_supported(struct device *dev, u64 mask)
2737 {
2738 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2739 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2740
2741 if (unlikely(dma_ops == NULL))
2742 return 0;
2743 @@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2744
2745 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2746 {
2747 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2748 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2749
2750 if (unlikely(dma_ops == NULL))
2751 return -EIO;
2752 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2753 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2754 dma_addr_t *dma_handle, gfp_t flag)
2755 {
2756 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2757 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2758 void *cpu_addr;
2759
2760 BUG_ON(!dma_ops);
2761 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2762 static inline void dma_free_coherent(struct device *dev, size_t size,
2763 void *cpu_addr, dma_addr_t dma_handle)
2764 {
2765 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2766 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2767
2768 BUG_ON(!dma_ops);
2769
2770 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2771
2772 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2773 {
2774 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2775 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2776
2777 if (dma_ops->mapping_error)
2778 return dma_ops->mapping_error(dev, dma_addr);
2779 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2780 index 5698502..5db093c 100644
2781 --- a/arch/powerpc/include/asm/elf.h
2782 +++ b/arch/powerpc/include/asm/elf.h
2783 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2784 the loader. We need to make sure that it is out of the way of the program
2785 that it will "exec", and that there is sufficient room for the brk. */
2786
2787 -extern unsigned long randomize_et_dyn(unsigned long base);
2788 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2789 +#define ELF_ET_DYN_BASE (0x20000000)
2790 +
2791 +#ifdef CONFIG_PAX_ASLR
2792 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2793 +
2794 +#ifdef __powerpc64__
2795 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2796 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2797 +#else
2798 +#define PAX_DELTA_MMAP_LEN 15
2799 +#define PAX_DELTA_STACK_LEN 15
2800 +#endif
2801 +#endif
2802
2803 /*
2804 * Our registers are always unsigned longs, whether we're a 32 bit
2805 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2806 (0x7ff >> (PAGE_SHIFT - 12)) : \
2807 (0x3ffff >> (PAGE_SHIFT - 12)))
2808
2809 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2810 -#define arch_randomize_brk arch_randomize_brk
2811 -
2812 #endif /* __KERNEL__ */
2813
2814 /*
2815 diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2816 index edfc980..1766f59 100644
2817 --- a/arch/powerpc/include/asm/iommu.h
2818 +++ b/arch/powerpc/include/asm/iommu.h
2819 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2820 extern void iommu_init_early_dart(void);
2821 extern void iommu_init_early_pasemi(void);
2822
2823 +/* dma-iommu.c */
2824 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2825 +
2826 #ifdef CONFIG_PCI
2827 extern void pci_iommu_init(void);
2828 extern void pci_direct_iommu_init(void);
2829 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2830 index 9163695..5a00112 100644
2831 --- a/arch/powerpc/include/asm/kmap_types.h
2832 +++ b/arch/powerpc/include/asm/kmap_types.h
2833 @@ -26,6 +26,7 @@ enum km_type {
2834 KM_SOFTIRQ1,
2835 KM_PPC_SYNC_PAGE,
2836 KM_PPC_SYNC_ICACHE,
2837 + KM_CLEARPAGE,
2838 KM_TYPE_NR
2839 };
2840
2841 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2842 index ff24254..fe45b21 100644
2843 --- a/arch/powerpc/include/asm/page.h
2844 +++ b/arch/powerpc/include/asm/page.h
2845 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2846 * and needs to be executable. This means the whole heap ends
2847 * up being executable.
2848 */
2849 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2850 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2851 +#define VM_DATA_DEFAULT_FLAGS32 \
2852 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2853 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2854
2855 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2856 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2857 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2858 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2859 #endif
2860
2861 +#define ktla_ktva(addr) (addr)
2862 +#define ktva_ktla(addr) (addr)
2863 +
2864 #ifndef __ASSEMBLY__
2865
2866 #undef STRICT_MM_TYPECHECKS
2867 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2868 index 3f17b83..1f9e766 100644
2869 --- a/arch/powerpc/include/asm/page_64.h
2870 +++ b/arch/powerpc/include/asm/page_64.h
2871 @@ -180,15 +180,18 @@ do { \
2872 * stack by default, so in the absense of a PT_GNU_STACK program header
2873 * we turn execute permission off.
2874 */
2875 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2876 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2877 +#define VM_STACK_DEFAULT_FLAGS32 \
2878 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2879 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2880
2881 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2882 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2883
2884 +#ifndef CONFIG_PAX_PAGEEXEC
2885 #define VM_STACK_DEFAULT_FLAGS \
2886 (test_thread_flag(TIF_32BIT) ? \
2887 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2888 +#endif
2889
2890 #include <asm-generic/getorder.h>
2891
2892 diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2893 index b5ea626..40308222 100644
2894 --- a/arch/powerpc/include/asm/pci.h
2895 +++ b/arch/powerpc/include/asm/pci.h
2896 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2897 }
2898
2899 #ifdef CONFIG_PCI
2900 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2901 -extern struct dma_map_ops *get_pci_dma_ops(void);
2902 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2903 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2904 #else /* CONFIG_PCI */
2905 #define set_pci_dma_ops(d)
2906 #define get_pci_dma_ops() NULL
2907 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2908 index 2a5da06..d65bea2 100644
2909 --- a/arch/powerpc/include/asm/pgtable.h
2910 +++ b/arch/powerpc/include/asm/pgtable.h
2911 @@ -2,6 +2,7 @@
2912 #define _ASM_POWERPC_PGTABLE_H
2913 #ifdef __KERNEL__
2914
2915 +#include <linux/const.h>
2916 #ifndef __ASSEMBLY__
2917 #include <asm/processor.h> /* For TASK_SIZE */
2918 #include <asm/mmu.h>
2919 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2920 index 4aad413..85d86bf 100644
2921 --- a/arch/powerpc/include/asm/pte-hash32.h
2922 +++ b/arch/powerpc/include/asm/pte-hash32.h
2923 @@ -21,6 +21,7 @@
2924 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2925 #define _PAGE_USER 0x004 /* usermode access allowed */
2926 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2927 +#define _PAGE_EXEC _PAGE_GUARDED
2928 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2929 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2930 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2931 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2932 index 8c34149..78f425a 100644
2933 --- a/arch/powerpc/include/asm/ptrace.h
2934 +++ b/arch/powerpc/include/asm/ptrace.h
2935 @@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2936 } while(0)
2937
2938 struct task_struct;
2939 -extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2940 +extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2941 extern int ptrace_put_reg(struct task_struct *task, int regno,
2942 unsigned long data);
2943
2944 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2945 index 32a7c30..be3a8bb 100644
2946 --- a/arch/powerpc/include/asm/reg.h
2947 +++ b/arch/powerpc/include/asm/reg.h
2948 @@ -191,6 +191,7 @@
2949 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2950 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2951 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2952 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2953 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2954 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2955 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2956 diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2957 index 8979d4c..d2fd0d3 100644
2958 --- a/arch/powerpc/include/asm/swiotlb.h
2959 +++ b/arch/powerpc/include/asm/swiotlb.h
2960 @@ -13,7 +13,7 @@
2961
2962 #include <linux/swiotlb.h>
2963
2964 -extern struct dma_map_ops swiotlb_dma_ops;
2965 +extern const struct dma_map_ops swiotlb_dma_ops;
2966
2967 static inline void dma_mark_clean(void *addr, size_t size) {}
2968
2969 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2970 index 094a12a..877a60a 100644
2971 --- a/arch/powerpc/include/asm/system.h
2972 +++ b/arch/powerpc/include/asm/system.h
2973 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2974 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2975 #endif
2976
2977 -extern unsigned long arch_align_stack(unsigned long sp);
2978 +#define arch_align_stack(x) ((x) & ~0xfUL)
2979
2980 /* Used in very early kernel initialization. */
2981 extern unsigned long reloc_offset(void);
2982 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2983 index bd0fb84..a42a14b 100644
2984 --- a/arch/powerpc/include/asm/uaccess.h
2985 +++ b/arch/powerpc/include/asm/uaccess.h
2986 @@ -13,6 +13,8 @@
2987 #define VERIFY_READ 0
2988 #define VERIFY_WRITE 1
2989
2990 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2991 +
2992 /*
2993 * The fs value determines whether argument validity checking should be
2994 * performed or not. If get_fs() == USER_DS, checking is performed, with
2995 @@ -327,52 +329,6 @@ do { \
2996 extern unsigned long __copy_tofrom_user(void __user *to,
2997 const void __user *from, unsigned long size);
2998
2999 -#ifndef __powerpc64__
3000 -
3001 -static inline unsigned long copy_from_user(void *to,
3002 - const void __user *from, unsigned long n)
3003 -{
3004 - unsigned long over;
3005 -
3006 - if (access_ok(VERIFY_READ, from, n))
3007 - return __copy_tofrom_user((__force void __user *)to, from, n);
3008 - if ((unsigned long)from < TASK_SIZE) {
3009 - over = (unsigned long)from + n - TASK_SIZE;
3010 - return __copy_tofrom_user((__force void __user *)to, from,
3011 - n - over) + over;
3012 - }
3013 - return n;
3014 -}
3015 -
3016 -static inline unsigned long copy_to_user(void __user *to,
3017 - const void *from, unsigned long n)
3018 -{
3019 - unsigned long over;
3020 -
3021 - if (access_ok(VERIFY_WRITE, to, n))
3022 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3023 - if ((unsigned long)to < TASK_SIZE) {
3024 - over = (unsigned long)to + n - TASK_SIZE;
3025 - return __copy_tofrom_user(to, (__force void __user *)from,
3026 - n - over) + over;
3027 - }
3028 - return n;
3029 -}
3030 -
3031 -#else /* __powerpc64__ */
3032 -
3033 -#define __copy_in_user(to, from, size) \
3034 - __copy_tofrom_user((to), (from), (size))
3035 -
3036 -extern unsigned long copy_from_user(void *to, const void __user *from,
3037 - unsigned long n);
3038 -extern unsigned long copy_to_user(void __user *to, const void *from,
3039 - unsigned long n);
3040 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3041 - unsigned long n);
3042 -
3043 -#endif /* __powerpc64__ */
3044 -
3045 static inline unsigned long __copy_from_user_inatomic(void *to,
3046 const void __user *from, unsigned long n)
3047 {
3048 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3049 if (ret == 0)
3050 return 0;
3051 }
3052 +
3053 + if (!__builtin_constant_p(n))
3054 + check_object_size(to, n, false);
3055 +
3056 return __copy_tofrom_user((__force void __user *)to, from, n);
3057 }
3058
3059 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3060 if (ret == 0)
3061 return 0;
3062 }
3063 +
3064 + if (!__builtin_constant_p(n))
3065 + check_object_size(from, n, true);
3066 +
3067 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3068 }
3069
3070 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3071 return __copy_to_user_inatomic(to, from, size);
3072 }
3073
3074 +#ifndef __powerpc64__
3075 +
3076 +static inline unsigned long __must_check copy_from_user(void *to,
3077 + const void __user *from, unsigned long n)
3078 +{
3079 + unsigned long over;
3080 +
3081 + if ((long)n < 0)
3082 + return n;
3083 +
3084 + if (access_ok(VERIFY_READ, from, n)) {
3085 + if (!__builtin_constant_p(n))
3086 + check_object_size(to, n, false);
3087 + return __copy_tofrom_user((__force void __user *)to, from, n);
3088 + }
3089 + if ((unsigned long)from < TASK_SIZE) {
3090 + over = (unsigned long)from + n - TASK_SIZE;
3091 + if (!__builtin_constant_p(n - over))
3092 + check_object_size(to, n - over, false);
3093 + return __copy_tofrom_user((__force void __user *)to, from,
3094 + n - over) + over;
3095 + }
3096 + return n;
3097 +}
3098 +
3099 +static inline unsigned long __must_check copy_to_user(void __user *to,
3100 + const void *from, unsigned long n)
3101 +{
3102 + unsigned long over;
3103 +
3104 + if ((long)n < 0)
3105 + return n;
3106 +
3107 + if (access_ok(VERIFY_WRITE, to, n)) {
3108 + if (!__builtin_constant_p(n))
3109 + check_object_size(from, n, true);
3110 + return __copy_tofrom_user(to, (__force void __user *)from, n);
3111 + }
3112 + if ((unsigned long)to < TASK_SIZE) {
3113 + over = (unsigned long)to + n - TASK_SIZE;
3114 + if (!__builtin_constant_p(n))
3115 + check_object_size(from, n - over, true);
3116 + return __copy_tofrom_user(to, (__force void __user *)from,
3117 + n - over) + over;
3118 + }
3119 + return n;
3120 +}
3121 +
3122 +#else /* __powerpc64__ */
3123 +
3124 +#define __copy_in_user(to, from, size) \
3125 + __copy_tofrom_user((to), (from), (size))
3126 +
3127 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3128 +{
3129 + if ((long)n < 0 || n > INT_MAX)
3130 + return n;
3131 +
3132 + if (!__builtin_constant_p(n))
3133 + check_object_size(to, n, false);
3134 +
3135 + if (likely(access_ok(VERIFY_READ, from, n)))
3136 + n = __copy_from_user(to, from, n);
3137 + else
3138 + memset(to, 0, n);
3139 + return n;
3140 +}
3141 +
3142 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3143 +{
3144 + if ((long)n < 0 || n > INT_MAX)
3145 + return n;
3146 +
3147 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
3148 + if (!__builtin_constant_p(n))
3149 + check_object_size(from, n, true);
3150 + n = __copy_to_user(to, from, n);
3151 + }
3152 + return n;
3153 +}
3154 +
3155 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
3156 + unsigned long n);
3157 +
3158 +#endif /* __powerpc64__ */
3159 +
3160 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3161
3162 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3163 diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3164 index bb37b1d..01fe9ce 100644
3165 --- a/arch/powerpc/kernel/cacheinfo.c
3166 +++ b/arch/powerpc/kernel/cacheinfo.c
3167 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3168 &cache_assoc_attr,
3169 };
3170
3171 -static struct sysfs_ops cache_index_ops = {
3172 +static const struct sysfs_ops cache_index_ops = {
3173 .show = cache_index_show,
3174 };
3175
3176 diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3177 index 37771a5..648530c 100644
3178 --- a/arch/powerpc/kernel/dma-iommu.c
3179 +++ b/arch/powerpc/kernel/dma-iommu.c
3180 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3181 }
3182
3183 /* We support DMA to/from any memory page via the iommu */
3184 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3185 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
3186 {
3187 struct iommu_table *tbl = get_iommu_table_base(dev);
3188
3189 diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3190 index e96cbbd..bdd6d41 100644
3191 --- a/arch/powerpc/kernel/dma-swiotlb.c
3192 +++ b/arch/powerpc/kernel/dma-swiotlb.c
3193 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3194 * map_page, and unmap_page on highmem, use normal dma_ops
3195 * for everything else.
3196 */
3197 -struct dma_map_ops swiotlb_dma_ops = {
3198 +const struct dma_map_ops swiotlb_dma_ops = {
3199 .alloc_coherent = dma_direct_alloc_coherent,
3200 .free_coherent = dma_direct_free_coherent,
3201 .map_sg = swiotlb_map_sg_attrs,
3202 diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3203 index 6215062..ebea59c 100644
3204 --- a/arch/powerpc/kernel/dma.c
3205 +++ b/arch/powerpc/kernel/dma.c
3206 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3207 }
3208 #endif
3209
3210 -struct dma_map_ops dma_direct_ops = {
3211 +const struct dma_map_ops dma_direct_ops = {
3212 .alloc_coherent = dma_direct_alloc_coherent,
3213 .free_coherent = dma_direct_free_coherent,
3214 .map_sg = dma_direct_map_sg,
3215 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3216 index 24dcc0e..a300455 100644
3217 --- a/arch/powerpc/kernel/exceptions-64e.S
3218 +++ b/arch/powerpc/kernel/exceptions-64e.S
3219 @@ -455,6 +455,7 @@ storage_fault_common:
3220 std r14,_DAR(r1)
3221 std r15,_DSISR(r1)
3222 addi r3,r1,STACK_FRAME_OVERHEAD
3223 + bl .save_nvgprs
3224 mr r4,r14
3225 mr r5,r15
3226 ld r14,PACA_EXGEN+EX_R14(r13)
3227 @@ -464,8 +465,7 @@ storage_fault_common:
3228 cmpdi r3,0
3229 bne- 1f
3230 b .ret_from_except_lite
3231 -1: bl .save_nvgprs
3232 - mr r5,r3
3233 +1: mr r5,r3
3234 addi r3,r1,STACK_FRAME_OVERHEAD
3235 ld r4,_DAR(r1)
3236 bl .bad_page_fault
3237 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3238 index 1808876..9fd206a 100644
3239 --- a/arch/powerpc/kernel/exceptions-64s.S
3240 +++ b/arch/powerpc/kernel/exceptions-64s.S
3241 @@ -818,10 +818,10 @@ handle_page_fault:
3242 11: ld r4,_DAR(r1)
3243 ld r5,_DSISR(r1)
3244 addi r3,r1,STACK_FRAME_OVERHEAD
3245 + bl .save_nvgprs
3246 bl .do_page_fault
3247 cmpdi r3,0
3248 beq+ 13f
3249 - bl .save_nvgprs
3250 mr r5,r3
3251 addi r3,r1,STACK_FRAME_OVERHEAD
3252 lwz r4,_DAR(r1)
3253 diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3254 index a4c8b38..1b09ad9 100644
3255 --- a/arch/powerpc/kernel/ibmebus.c
3256 +++ b/arch/powerpc/kernel/ibmebus.c
3257 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3258 return 1;
3259 }
3260
3261 -static struct dma_map_ops ibmebus_dma_ops = {
3262 +static const struct dma_map_ops ibmebus_dma_ops = {
3263 .alloc_coherent = ibmebus_alloc_coherent,
3264 .free_coherent = ibmebus_free_coherent,
3265 .map_sg = ibmebus_map_sg,
3266 diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3267 index 641c74b..8339ad7 100644
3268 --- a/arch/powerpc/kernel/kgdb.c
3269 +++ b/arch/powerpc/kernel/kgdb.c
3270 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3271 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3272 return 0;
3273
3274 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3275 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3276 regs->nip += 4;
3277
3278 return 1;
3279 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3280 /*
3281 * Global data
3282 */
3283 -struct kgdb_arch arch_kgdb_ops = {
3284 +const struct kgdb_arch arch_kgdb_ops = {
3285 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3286 };
3287
3288 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3289 index 477c663..4f50234 100644
3290 --- a/arch/powerpc/kernel/module.c
3291 +++ b/arch/powerpc/kernel/module.c
3292 @@ -31,11 +31,24 @@
3293
3294 LIST_HEAD(module_bug_list);
3295
3296 +#ifdef CONFIG_PAX_KERNEXEC
3297 void *module_alloc(unsigned long size)
3298 {
3299 if (size == 0)
3300 return NULL;
3301
3302 + return vmalloc(size);
3303 +}
3304 +
3305 +void *module_alloc_exec(unsigned long size)
3306 +#else
3307 +void *module_alloc(unsigned long size)
3308 +#endif
3309 +
3310 +{
3311 + if (size == 0)
3312 + return NULL;
3313 +
3314 return vmalloc_exec(size);
3315 }
3316
3317 @@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3318 vfree(module_region);
3319 }
3320
3321 +#ifdef CONFIG_PAX_KERNEXEC
3322 +void module_free_exec(struct module *mod, void *module_region)
3323 +{
3324 + module_free(mod, module_region);
3325 +}
3326 +#endif
3327 +
3328 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3329 const Elf_Shdr *sechdrs,
3330 const char *name)
3331 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3332 index f832773..0507238 100644
3333 --- a/arch/powerpc/kernel/module_32.c
3334 +++ b/arch/powerpc/kernel/module_32.c
3335 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3336 me->arch.core_plt_section = i;
3337 }
3338 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3339 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3340 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3341 return -ENOEXEC;
3342 }
3343
3344 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3345
3346 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3347 /* Init, or core PLT? */
3348 - if (location >= mod->module_core
3349 - && location < mod->module_core + mod->core_size)
3350 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3351 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3352 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3353 - else
3354 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3355 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3356 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3357 + else {
3358 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3359 + return ~0UL;
3360 + }
3361
3362 /* Find this entry, or if that fails, the next avail. entry */
3363 while (entry->jump[0]) {
3364 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3365 index cadbed6..b9bbb00 100644
3366 --- a/arch/powerpc/kernel/pci-common.c
3367 +++ b/arch/powerpc/kernel/pci-common.c
3368 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3369 unsigned int ppc_pci_flags = 0;
3370
3371
3372 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3373 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3374
3375 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3376 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3377 {
3378 pci_dma_ops = dma_ops;
3379 }
3380
3381 -struct dma_map_ops *get_pci_dma_ops(void)
3382 +const struct dma_map_ops *get_pci_dma_ops(void)
3383 {
3384 return pci_dma_ops;
3385 }
3386 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3387 index 7b816da..8d5c277 100644
3388 --- a/arch/powerpc/kernel/process.c
3389 +++ b/arch/powerpc/kernel/process.c
3390 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3391 * Lookup NIP late so we have the best change of getting the
3392 * above info out without failing
3393 */
3394 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3395 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3396 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3397 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3398 #endif
3399 show_stack(current, (unsigned long *) regs->gpr[1]);
3400 if (!user_mode(regs))
3401 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3402 newsp = stack[0];
3403 ip = stack[STACK_FRAME_LR_SAVE];
3404 if (!firstframe || ip != lr) {
3405 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3406 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3407 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3408 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3409 - printk(" (%pS)",
3410 + printk(" (%pA)",
3411 (void *)current->ret_stack[curr_frame].ret);
3412 curr_frame--;
3413 }
3414 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3415 struct pt_regs *regs = (struct pt_regs *)
3416 (sp + STACK_FRAME_OVERHEAD);
3417 lr = regs->link;
3418 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3419 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3420 regs->trap, (void *)regs->nip, (void *)lr);
3421 firstframe = 1;
3422 }
3423 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3424 }
3425
3426 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3427 -
3428 -unsigned long arch_align_stack(unsigned long sp)
3429 -{
3430 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3431 - sp -= get_random_int() & ~PAGE_MASK;
3432 - return sp & ~0xf;
3433 -}
3434 -
3435 -static inline unsigned long brk_rnd(void)
3436 -{
3437 - unsigned long rnd = 0;
3438 -
3439 - /* 8MB for 32bit, 1GB for 64bit */
3440 - if (is_32bit_task())
3441 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3442 - else
3443 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3444 -
3445 - return rnd << PAGE_SHIFT;
3446 -}
3447 -
3448 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3449 -{
3450 - unsigned long base = mm->brk;
3451 - unsigned long ret;
3452 -
3453 -#ifdef CONFIG_PPC_STD_MMU_64
3454 - /*
3455 - * If we are using 1TB segments and we are allowed to randomise
3456 - * the heap, we can put it above 1TB so it is backed by a 1TB
3457 - * segment. Otherwise the heap will be in the bottom 1TB
3458 - * which always uses 256MB segments and this may result in a
3459 - * performance penalty.
3460 - */
3461 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3462 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3463 -#endif
3464 -
3465 - ret = PAGE_ALIGN(base + brk_rnd());
3466 -
3467 - if (ret < mm->brk)
3468 - return mm->brk;
3469 -
3470 - return ret;
3471 -}
3472 -
3473 -unsigned long randomize_et_dyn(unsigned long base)
3474 -{
3475 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3476 -
3477 - if (ret < base)
3478 - return base;
3479 -
3480 - return ret;
3481 -}
3482 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3483 index ef14988..856c4bc 100644
3484 --- a/arch/powerpc/kernel/ptrace.c
3485 +++ b/arch/powerpc/kernel/ptrace.c
3486 @@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3487 /*
3488 * Get contents of register REGNO in task TASK.
3489 */
3490 -unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3491 +unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3492 {
3493 if (task->thread.regs == NULL)
3494 return -EIO;
3495 @@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3496
3497 CHECK_FULL_REGS(child->thread.regs);
3498 if (index < PT_FPR0) {
3499 - tmp = ptrace_get_reg(child, (int) index);
3500 + tmp = ptrace_get_reg(child, index);
3501 } else {
3502 flush_fp_to_thread(child);
3503 tmp = ((unsigned long *)child->thread.fpr)
3504 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3505 index d670429..2bc59b2 100644
3506 --- a/arch/powerpc/kernel/signal_32.c
3507 +++ b/arch/powerpc/kernel/signal_32.c
3508 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3509 /* Save user registers on the stack */
3510 frame = &rt_sf->uc.uc_mcontext;
3511 addr = frame;
3512 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3513 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3514 if (save_user_regs(regs, frame, 0, 1))
3515 goto badframe;
3516 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3517 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3518 index 2fe6fc6..ada0d96 100644
3519 --- a/arch/powerpc/kernel/signal_64.c
3520 +++ b/arch/powerpc/kernel/signal_64.c
3521 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3522 current->thread.fpscr.val = 0;
3523
3524 /* Set up to return from userspace. */
3525 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3526 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3527 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3528 } else {
3529 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3530 diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3531 index b97c2d6..dd01a6a 100644
3532 --- a/arch/powerpc/kernel/sys_ppc32.c
3533 +++ b/arch/powerpc/kernel/sys_ppc32.c
3534 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3535 if (oldlenp) {
3536 if (!error) {
3537 if (get_user(oldlen, oldlenp) ||
3538 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3539 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3540 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3541 error = -EFAULT;
3542 }
3543 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3544 }
3545 return error;
3546 }
3547 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3548 index 6f0ae1a..e4b6a56 100644
3549 --- a/arch/powerpc/kernel/traps.c
3550 +++ b/arch/powerpc/kernel/traps.c
3551 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3552 static inline void pmac_backlight_unblank(void) { }
3553 #endif
3554
3555 +extern void gr_handle_kernel_exploit(void);
3556 +
3557 int die(const char *str, struct pt_regs *regs, long err)
3558 {
3559 static struct {
3560 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3561 if (panic_on_oops)
3562 panic("Fatal exception");
3563
3564 + gr_handle_kernel_exploit();
3565 +
3566 oops_exit();
3567 do_exit(err);
3568
3569 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3570 index 137dc22..fe57a79 100644
3571 --- a/arch/powerpc/kernel/vdso.c
3572 +++ b/arch/powerpc/kernel/vdso.c
3573 @@ -36,6 +36,7 @@
3574 #include <asm/firmware.h>
3575 #include <asm/vdso.h>
3576 #include <asm/vdso_datapage.h>
3577 +#include <asm/mman.h>
3578
3579 #include "setup.h"
3580
3581 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3582 vdso_base = VDSO32_MBASE;
3583 #endif
3584
3585 - current->mm->context.vdso_base = 0;
3586 + current->mm->context.vdso_base = ~0UL;
3587
3588 /* vDSO has a problem and was disabled, just don't "enable" it for the
3589 * process
3590 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3591 vdso_base = get_unmapped_area(NULL, vdso_base,
3592 (vdso_pages << PAGE_SHIFT) +
3593 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3594 - 0, 0);
3595 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
3596 if (IS_ERR_VALUE(vdso_base)) {
3597 rc = vdso_base;
3598 goto fail_mmapsem;
3599 diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3600 index 77f6421..829564a 100644
3601 --- a/arch/powerpc/kernel/vio.c
3602 +++ b/arch/powerpc/kernel/vio.c
3603 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3604 vio_cmo_dealloc(viodev, alloc_size);
3605 }
3606
3607 -struct dma_map_ops vio_dma_mapping_ops = {
3608 +static const struct dma_map_ops vio_dma_mapping_ops = {
3609 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3610 .free_coherent = vio_dma_iommu_free_coherent,
3611 .map_sg = vio_dma_iommu_map_sg,
3612 .unmap_sg = vio_dma_iommu_unmap_sg,
3613 + .dma_supported = dma_iommu_dma_supported,
3614 .map_page = vio_dma_iommu_map_page,
3615 .unmap_page = vio_dma_iommu_unmap_page,
3616
3617 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3618
3619 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3620 {
3621 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3622 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3623 }
3624
3625 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3626 index 5eea6f3..5d10396 100644
3627 --- a/arch/powerpc/lib/usercopy_64.c
3628 +++ b/arch/powerpc/lib/usercopy_64.c
3629 @@ -9,22 +9,6 @@
3630 #include <linux/module.h>
3631 #include <asm/uaccess.h>
3632
3633 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3634 -{
3635 - if (likely(access_ok(VERIFY_READ, from, n)))
3636 - n = __copy_from_user(to, from, n);
3637 - else
3638 - memset(to, 0, n);
3639 - return n;
3640 -}
3641 -
3642 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3643 -{
3644 - if (likely(access_ok(VERIFY_WRITE, to, n)))
3645 - n = __copy_to_user(to, from, n);
3646 - return n;
3647 -}
3648 -
3649 unsigned long copy_in_user(void __user *to, const void __user *from,
3650 unsigned long n)
3651 {
3652 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3653 return n;
3654 }
3655
3656 -EXPORT_SYMBOL(copy_from_user);
3657 -EXPORT_SYMBOL(copy_to_user);
3658 EXPORT_SYMBOL(copy_in_user);
3659
3660 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3661 index e7dae82..877ce0d 100644
3662 --- a/arch/powerpc/mm/fault.c
3663 +++ b/arch/powerpc/mm/fault.c
3664 @@ -30,6 +30,10 @@
3665 #include <linux/kprobes.h>
3666 #include <linux/kdebug.h>
3667 #include <linux/perf_event.h>
3668 +#include <linux/slab.h>
3669 +#include <linux/pagemap.h>
3670 +#include <linux/compiler.h>
3671 +#include <linux/unistd.h>
3672
3673 #include <asm/firmware.h>
3674 #include <asm/page.h>
3675 @@ -40,6 +44,7 @@
3676 #include <asm/uaccess.h>
3677 #include <asm/tlbflush.h>
3678 #include <asm/siginfo.h>
3679 +#include <asm/ptrace.h>
3680
3681
3682 #ifdef CONFIG_KPROBES
3683 @@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3684 }
3685 #endif
3686
3687 +#ifdef CONFIG_PAX_PAGEEXEC
3688 +/*
3689 + * PaX: decide what to do with offenders (regs->nip = fault address)
3690 + *
3691 + * returns 1 when task should be killed
3692 + */
3693 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3694 +{
3695 + return 1;
3696 +}
3697 +
3698 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3699 +{
3700 + unsigned long i;
3701 +
3702 + printk(KERN_ERR "PAX: bytes at PC: ");
3703 + for (i = 0; i < 5; i++) {
3704 + unsigned int c;
3705 + if (get_user(c, (unsigned int __user *)pc+i))
3706 + printk(KERN_CONT "???????? ");
3707 + else
3708 + printk(KERN_CONT "%08x ", c);
3709 + }
3710 + printk("\n");
3711 +}
3712 +#endif
3713 +
3714 /*
3715 * Check whether the instruction at regs->nip is a store using
3716 * an update addressing form which will update r1.
3717 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3718 * indicate errors in DSISR but can validly be set in SRR1.
3719 */
3720 if (trap == 0x400)
3721 - error_code &= 0x48200000;
3722 + error_code &= 0x58200000;
3723 else
3724 is_write = error_code & DSISR_ISSTORE;
3725 #else
3726 @@ -250,7 +282,7 @@ good_area:
3727 * "undefined". Of those that can be set, this is the only
3728 * one which seems bad.
3729 */
3730 - if (error_code & 0x10000000)
3731 + if (error_code & DSISR_GUARDED)
3732 /* Guarded storage error. */
3733 goto bad_area;
3734 #endif /* CONFIG_8xx */
3735 @@ -265,7 +297,7 @@ good_area:
3736 * processors use the same I/D cache coherency mechanism
3737 * as embedded.
3738 */
3739 - if (error_code & DSISR_PROTFAULT)
3740 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3741 goto bad_area;
3742 #endif /* CONFIG_PPC_STD_MMU */
3743
3744 @@ -335,6 +367,23 @@ bad_area:
3745 bad_area_nosemaphore:
3746 /* User mode accesses cause a SIGSEGV */
3747 if (user_mode(regs)) {
3748 +
3749 +#ifdef CONFIG_PAX_PAGEEXEC
3750 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3751 +#ifdef CONFIG_PPC_STD_MMU
3752 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3753 +#else
3754 + if (is_exec && regs->nip == address) {
3755 +#endif
3756 + switch (pax_handle_fetch_fault(regs)) {
3757 + }
3758 +
3759 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3760 + do_group_exit(SIGKILL);
3761 + }
3762 + }
3763 +#endif
3764 +
3765 _exception(SIGSEGV, regs, code, address);
3766 return 0;
3767 }
3768 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3769 index 5973631..ad617af 100644
3770 --- a/arch/powerpc/mm/mem.c
3771 +++ b/arch/powerpc/mm/mem.c
3772 @@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3773 {
3774 unsigned long lmb_next_region_start_pfn,
3775 lmb_region_max_pfn;
3776 - int i;
3777 + unsigned int i;
3778
3779 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3780 lmb_region_max_pfn =
3781 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3782 index 0d957a4..26d968f 100644
3783 --- a/arch/powerpc/mm/mmap_64.c
3784 +++ b/arch/powerpc/mm/mmap_64.c
3785 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3786 */
3787 if (mmap_is_legacy()) {
3788 mm->mmap_base = TASK_UNMAPPED_BASE;
3789 +
3790 +#ifdef CONFIG_PAX_RANDMMAP
3791 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3792 + mm->mmap_base += mm->delta_mmap;
3793 +#endif
3794 +
3795 mm->get_unmapped_area = arch_get_unmapped_area;
3796 mm->unmap_area = arch_unmap_area;
3797 } else {
3798 mm->mmap_base = mmap_base();
3799 +
3800 +#ifdef CONFIG_PAX_RANDMMAP
3801 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3802 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3803 +#endif
3804 +
3805 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3806 mm->unmap_area = arch_unmap_area_topdown;
3807 }
3808 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3809 index ba51948..23009d9 100644
3810 --- a/arch/powerpc/mm/slice.c
3811 +++ b/arch/powerpc/mm/slice.c
3812 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3813 if ((mm->task_size - len) < addr)
3814 return 0;
3815 vma = find_vma(mm, addr);
3816 - return (!vma || (addr + len) <= vma->vm_start);
3817 + return check_heap_stack_gap(vma, addr, len);
3818 }
3819
3820 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3821 @@ -256,7 +256,7 @@ full_search:
3822 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3823 continue;
3824 }
3825 - if (!vma || addr + len <= vma->vm_start) {
3826 + if (check_heap_stack_gap(vma, addr, len)) {
3827 /*
3828 * Remember the place where we stopped the search:
3829 */
3830 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3831 }
3832 }
3833
3834 - addr = mm->mmap_base;
3835 - while (addr > len) {
3836 + if (mm->mmap_base < len)
3837 + addr = -ENOMEM;
3838 + else
3839 + addr = mm->mmap_base - len;
3840 +
3841 + while (!IS_ERR_VALUE(addr)) {
3842 /* Go down by chunk size */
3843 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3844 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3845
3846 /* Check for hit with different page size */
3847 mask = slice_range_to_mask(addr, len);
3848 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3849 * return with success:
3850 */
3851 vma = find_vma(mm, addr);
3852 - if (!vma || (addr + len) <= vma->vm_start) {
3853 + if (check_heap_stack_gap(vma, addr, len)) {
3854 /* remember the address as a hint for next time */
3855 if (use_cache)
3856 mm->free_area_cache = addr;
3857 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3858 mm->cached_hole_size = vma->vm_start - addr;
3859
3860 /* try just below the current vma->vm_start */
3861 - addr = vma->vm_start;
3862 + addr = skip_heap_stack_gap(vma, len);
3863 }
3864
3865 /*
3866 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3867 if (fixed && addr > (mm->task_size - len))
3868 return -EINVAL;
3869
3870 +#ifdef CONFIG_PAX_RANDMMAP
3871 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3872 + addr = 0;
3873 +#endif
3874 +
3875 /* If hint, make sure it matches our alignment restrictions */
3876 if (!fixed && addr) {
3877 addr = _ALIGN_UP(addr, 1ul << pshift);
3878 diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3879 index b5c753d..8f01abe 100644
3880 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3881 +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3882 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3883 lite5200_pm_target_state = PM_SUSPEND_ON;
3884 }
3885
3886 -static struct platform_suspend_ops lite5200_pm_ops = {
3887 +static const struct platform_suspend_ops lite5200_pm_ops = {
3888 .valid = lite5200_pm_valid,
3889 .begin = lite5200_pm_begin,
3890 .prepare = lite5200_pm_prepare,
3891 diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3892 index a55b0b6..478c18e 100644
3893 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3894 +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3895 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3896 iounmap(mbar);
3897 }
3898
3899 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3900 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3901 .valid = mpc52xx_pm_valid,
3902 .prepare = mpc52xx_pm_prepare,
3903 .enter = mpc52xx_pm_enter,
3904 diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3905 index 08e65fc..643d3ac 100644
3906 --- a/arch/powerpc/platforms/83xx/suspend.c
3907 +++ b/arch/powerpc/platforms/83xx/suspend.c
3908 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3909 return ret;
3910 }
3911
3912 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3913 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3914 .valid = mpc83xx_suspend_valid,
3915 .begin = mpc83xx_suspend_begin,
3916 .enter = mpc83xx_suspend_enter,
3917 diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3918 index ca5bfdf..1602e09 100644
3919 --- a/arch/powerpc/platforms/cell/iommu.c
3920 +++ b/arch/powerpc/platforms/cell/iommu.c
3921 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3922
3923 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3924
3925 -struct dma_map_ops dma_iommu_fixed_ops = {
3926 +const struct dma_map_ops dma_iommu_fixed_ops = {
3927 .alloc_coherent = dma_fixed_alloc_coherent,
3928 .free_coherent = dma_fixed_free_coherent,
3929 .map_sg = dma_fixed_map_sg,
3930 diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3931 index e34b305..20e48ec 100644
3932 --- a/arch/powerpc/platforms/ps3/system-bus.c
3933 +++ b/arch/powerpc/platforms/ps3/system-bus.c
3934 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3935 return mask >= DMA_BIT_MASK(32);
3936 }
3937
3938 -static struct dma_map_ops ps3_sb_dma_ops = {
3939 +static const struct dma_map_ops ps3_sb_dma_ops = {
3940 .alloc_coherent = ps3_alloc_coherent,
3941 .free_coherent = ps3_free_coherent,
3942 .map_sg = ps3_sb_map_sg,
3943 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3944 .unmap_page = ps3_unmap_page,
3945 };
3946
3947 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3948 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3949 .alloc_coherent = ps3_alloc_coherent,
3950 .free_coherent = ps3_free_coherent,
3951 .map_sg = ps3_ioc0_map_sg,
3952 diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3953 index f0e6f28..60d53ed 100644
3954 --- a/arch/powerpc/platforms/pseries/Kconfig
3955 +++ b/arch/powerpc/platforms/pseries/Kconfig
3956 @@ -2,6 +2,8 @@ config PPC_PSERIES
3957 depends on PPC64 && PPC_BOOK3S
3958 bool "IBM pSeries & new (POWER5-based) iSeries"
3959 select MPIC
3960 + select PCI_MSI
3961 + select XICS
3962 select PPC_I8259
3963 select PPC_RTAS
3964 select RTAS_ERROR_LOGGING
3965 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3966 index 43c0aca..42c045b 100644
3967 --- a/arch/s390/Kconfig
3968 +++ b/arch/s390/Kconfig
3969 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3970
3971 config S390_SWITCH_AMODE
3972 bool "Switch kernel/user addressing modes"
3973 + default y
3974 help
3975 This option allows to switch the addressing modes of kernel and user
3976 - space. The kernel parameter switch_amode=on will enable this feature,
3977 - default is disabled. Enabling this (via kernel parameter) on machines
3978 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3979 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3980 + will reduce system performance.
3981
3982 Note that this option will also be selected by selecting the execute
3983 - protection option below. Enabling the execute protection via the
3984 - noexec kernel parameter will also switch the addressing modes,
3985 - independent of the switch_amode kernel parameter.
3986 + protection option below. Enabling the execute protection will also
3987 + switch the addressing modes, independent of this option.
3988
3989
3990 config S390_EXEC_PROTECT
3991 bool "Data execute protection"
3992 + default y
3993 select S390_SWITCH_AMODE
3994 help
3995 This option allows to enable a buffer overflow protection for user
3996 space programs and it also selects the addressing mode option above.
3997 - The kernel parameter noexec=on will enable this feature and also
3998 - switch the addressing modes, default is disabled. Enabling this (via
3999 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
4000 - will reduce system performance.
4001 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
4002 + reduce system performance.
4003
4004 comment "Code generation options"
4005
4006 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4007 index ae7c8f9..3f01a0c 100644
4008 --- a/arch/s390/include/asm/atomic.h
4009 +++ b/arch/s390/include/asm/atomic.h
4010 @@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4011 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4012 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4013
4014 +#define atomic64_read_unchecked(v) atomic64_read(v)
4015 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4016 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4017 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4018 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4019 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4020 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4021 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4022 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4023 +
4024 #define smp_mb__before_atomic_dec() smp_mb()
4025 #define smp_mb__after_atomic_dec() smp_mb()
4026 #define smp_mb__before_atomic_inc() smp_mb()
4027 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4028 index e885442..e3a2817 100644
4029 --- a/arch/s390/include/asm/elf.h
4030 +++ b/arch/s390/include/asm/elf.h
4031 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
4032 that it will "exec", and that there is sufficient room for the brk. */
4033 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4034
4035 +#ifdef CONFIG_PAX_ASLR
4036 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4037 +
4038 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4039 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4040 +#endif
4041 +
4042 /* This yields a mask that user programs can use to figure out what
4043 instruction set this CPU supports. */
4044
4045 diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
4046 index e37478e..9ce0e9f 100644
4047 --- a/arch/s390/include/asm/setup.h
4048 +++ b/arch/s390/include/asm/setup.h
4049 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
4050 void detect_memory_layout(struct mem_chunk chunk[]);
4051
4052 #ifdef CONFIG_S390_SWITCH_AMODE
4053 -extern unsigned int switch_amode;
4054 +#define switch_amode (1)
4055 #else
4056 #define switch_amode (0)
4057 #endif
4058
4059 #ifdef CONFIG_S390_EXEC_PROTECT
4060 -extern unsigned int s390_noexec;
4061 +#define s390_noexec (1)
4062 #else
4063 #define s390_noexec (0)
4064 #endif
4065 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4066 index 8377e91..e28e6f1 100644
4067 --- a/arch/s390/include/asm/uaccess.h
4068 +++ b/arch/s390/include/asm/uaccess.h
4069 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
4070 copy_to_user(void __user *to, const void *from, unsigned long n)
4071 {
4072 might_fault();
4073 +
4074 + if ((long)n < 0)
4075 + return n;
4076 +
4077 if (access_ok(VERIFY_WRITE, to, n))
4078 n = __copy_to_user(to, from, n);
4079 return n;
4080 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4081 static inline unsigned long __must_check
4082 __copy_from_user(void *to, const void __user *from, unsigned long n)
4083 {
4084 + if ((long)n < 0)
4085 + return n;
4086 +
4087 if (__builtin_constant_p(n) && (n <= 256))
4088 return uaccess.copy_from_user_small(n, from, to);
4089 else
4090 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
4091 copy_from_user(void *to, const void __user *from, unsigned long n)
4092 {
4093 might_fault();
4094 +
4095 + if ((long)n < 0)
4096 + return n;
4097 +
4098 if (access_ok(VERIFY_READ, from, n))
4099 n = __copy_from_user(to, from, n);
4100 else
4101 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4102 index 639380a..72e3c02 100644
4103 --- a/arch/s390/kernel/module.c
4104 +++ b/arch/s390/kernel/module.c
4105 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4106
4107 /* Increase core size by size of got & plt and set start
4108 offsets for got and plt. */
4109 - me->core_size = ALIGN(me->core_size, 4);
4110 - me->arch.got_offset = me->core_size;
4111 - me->core_size += me->arch.got_size;
4112 - me->arch.plt_offset = me->core_size;
4113 - me->core_size += me->arch.plt_size;
4114 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4115 + me->arch.got_offset = me->core_size_rw;
4116 + me->core_size_rw += me->arch.got_size;
4117 + me->arch.plt_offset = me->core_size_rx;
4118 + me->core_size_rx += me->arch.plt_size;
4119 return 0;
4120 }
4121
4122 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4123 if (info->got_initialized == 0) {
4124 Elf_Addr *gotent;
4125
4126 - gotent = me->module_core + me->arch.got_offset +
4127 + gotent = me->module_core_rw + me->arch.got_offset +
4128 info->got_offset;
4129 *gotent = val;
4130 info->got_initialized = 1;
4131 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4132 else if (r_type == R_390_GOTENT ||
4133 r_type == R_390_GOTPLTENT)
4134 *(unsigned int *) loc =
4135 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4136 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4137 else if (r_type == R_390_GOT64 ||
4138 r_type == R_390_GOTPLT64)
4139 *(unsigned long *) loc = val;
4140 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4141 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4142 if (info->plt_initialized == 0) {
4143 unsigned int *ip;
4144 - ip = me->module_core + me->arch.plt_offset +
4145 + ip = me->module_core_rx + me->arch.plt_offset +
4146 info->plt_offset;
4147 #ifndef CONFIG_64BIT
4148 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4149 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4150 val - loc + 0xffffUL < 0x1ffffeUL) ||
4151 (r_type == R_390_PLT32DBL &&
4152 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4153 - val = (Elf_Addr) me->module_core +
4154 + val = (Elf_Addr) me->module_core_rx +
4155 me->arch.plt_offset +
4156 info->plt_offset;
4157 val += rela->r_addend - loc;
4158 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4159 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4160 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4161 val = val + rela->r_addend -
4162 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4163 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4164 if (r_type == R_390_GOTOFF16)
4165 *(unsigned short *) loc = val;
4166 else if (r_type == R_390_GOTOFF32)
4167 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4168 break;
4169 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4170 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4171 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4172 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4173 rela->r_addend - loc;
4174 if (r_type == R_390_GOTPC)
4175 *(unsigned int *) loc = val;
4176 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
4177 index 0b2573a..71a22ec 100644
4178 --- a/arch/s390/kernel/setup.c
4179 +++ b/arch/s390/kernel/setup.c
4180 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
4181 early_param("mem", early_parse_mem);
4182
4183 #ifdef CONFIG_S390_SWITCH_AMODE
4184 -unsigned int switch_amode = 0;
4185 -EXPORT_SYMBOL_GPL(switch_amode);
4186 -
4187 static int set_amode_and_uaccess(unsigned long user_amode,
4188 unsigned long user32_amode)
4189 {
4190 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4191 return 0;
4192 }
4193 }
4194 -
4195 -/*
4196 - * Switch kernel/user addressing modes?
4197 - */
4198 -static int __init early_parse_switch_amode(char *p)
4199 -{
4200 - switch_amode = 1;
4201 - return 0;
4202 -}
4203 -early_param("switch_amode", early_parse_switch_amode);
4204 -
4205 #else /* CONFIG_S390_SWITCH_AMODE */
4206 static inline int set_amode_and_uaccess(unsigned long user_amode,
4207 unsigned long user32_amode)
4208 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4209 }
4210 #endif /* CONFIG_S390_SWITCH_AMODE */
4211
4212 -#ifdef CONFIG_S390_EXEC_PROTECT
4213 -unsigned int s390_noexec = 0;
4214 -EXPORT_SYMBOL_GPL(s390_noexec);
4215 -
4216 -/*
4217 - * Enable execute protection?
4218 - */
4219 -static int __init early_parse_noexec(char *p)
4220 -{
4221 - if (!strncmp(p, "off", 3))
4222 - return 0;
4223 - switch_amode = 1;
4224 - s390_noexec = 1;
4225 - return 0;
4226 -}
4227 -early_param("noexec", early_parse_noexec);
4228 -#endif /* CONFIG_S390_EXEC_PROTECT */
4229 -
4230 static void setup_addressing_mode(void)
4231 {
4232 if (s390_noexec) {
4233 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4234 index 0ab74ae..c8b68f9 100644
4235 --- a/arch/s390/mm/mmap.c
4236 +++ b/arch/s390/mm/mmap.c
4237 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4238 */
4239 if (mmap_is_legacy()) {
4240 mm->mmap_base = TASK_UNMAPPED_BASE;
4241 +
4242 +#ifdef CONFIG_PAX_RANDMMAP
4243 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4244 + mm->mmap_base += mm->delta_mmap;
4245 +#endif
4246 +
4247 mm->get_unmapped_area = arch_get_unmapped_area;
4248 mm->unmap_area = arch_unmap_area;
4249 } else {
4250 mm->mmap_base = mmap_base();
4251 +
4252 +#ifdef CONFIG_PAX_RANDMMAP
4253 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4254 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4255 +#endif
4256 +
4257 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4258 mm->unmap_area = arch_unmap_area_topdown;
4259 }
4260 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4261 */
4262 if (mmap_is_legacy()) {
4263 mm->mmap_base = TASK_UNMAPPED_BASE;
4264 +
4265 +#ifdef CONFIG_PAX_RANDMMAP
4266 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4267 + mm->mmap_base += mm->delta_mmap;
4268 +#endif
4269 +
4270 mm->get_unmapped_area = s390_get_unmapped_area;
4271 mm->unmap_area = arch_unmap_area;
4272 } else {
4273 mm->mmap_base = mmap_base();
4274 +
4275 +#ifdef CONFIG_PAX_RANDMMAP
4276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4277 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4278 +#endif
4279 +
4280 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4281 mm->unmap_area = arch_unmap_area_topdown;
4282 }
4283 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4284 index 589d5c7..669e274 100644
4285 --- a/arch/score/include/asm/system.h
4286 +++ b/arch/score/include/asm/system.h
4287 @@ -17,7 +17,7 @@ do { \
4288 #define finish_arch_switch(prev) do {} while (0)
4289
4290 typedef void (*vi_handler_t)(void);
4291 -extern unsigned long arch_align_stack(unsigned long sp);
4292 +#define arch_align_stack(x) (x)
4293
4294 #define mb() barrier()
4295 #define rmb() barrier()
4296 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4297 index 25d0803..d6c8e36 100644
4298 --- a/arch/score/kernel/process.c
4299 +++ b/arch/score/kernel/process.c
4300 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4301
4302 return task_pt_regs(task)->cp0_epc;
4303 }
4304 -
4305 -unsigned long arch_align_stack(unsigned long sp)
4306 -{
4307 - return sp;
4308 -}
4309 diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4310 index d936c1a..304a252 100644
4311 --- a/arch/sh/boards/mach-hp6xx/pm.c
4312 +++ b/arch/sh/boards/mach-hp6xx/pm.c
4313 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4314 return 0;
4315 }
4316
4317 -static struct platform_suspend_ops hp6x0_pm_ops = {
4318 +static const struct platform_suspend_ops hp6x0_pm_ops = {
4319 .enter = hp6x0_pm_enter,
4320 .valid = suspend_valid_only_mem,
4321 };
4322 diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4323 index 8a8a993..7b3079b 100644
4324 --- a/arch/sh/kernel/cpu/sh4/sq.c
4325 +++ b/arch/sh/kernel/cpu/sh4/sq.c
4326 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4327 NULL,
4328 };
4329
4330 -static struct sysfs_ops sq_sysfs_ops = {
4331 +static const struct sysfs_ops sq_sysfs_ops = {
4332 .show = sq_sysfs_show,
4333 .store = sq_sysfs_store,
4334 };
4335 diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4336 index ee3c2aa..c49cee6 100644
4337 --- a/arch/sh/kernel/cpu/shmobile/pm.c
4338 +++ b/arch/sh/kernel/cpu/shmobile/pm.c
4339 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4340 return 0;
4341 }
4342
4343 -static struct platform_suspend_ops sh_pm_ops = {
4344 +static const struct platform_suspend_ops sh_pm_ops = {
4345 .enter = sh_pm_enter,
4346 .valid = suspend_valid_only_mem,
4347 };
4348 diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4349 index 3e532d0..9faa306 100644
4350 --- a/arch/sh/kernel/kgdb.c
4351 +++ b/arch/sh/kernel/kgdb.c
4352 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4353 {
4354 }
4355
4356 -struct kgdb_arch arch_kgdb_ops = {
4357 +const struct kgdb_arch arch_kgdb_ops = {
4358 /* Breakpoint instruction: trapa #0x3c */
4359 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4360 .gdb_bpt_instr = { 0x3c, 0xc3 },
4361 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4362 index afeb710..d1d1289 100644
4363 --- a/arch/sh/mm/mmap.c
4364 +++ b/arch/sh/mm/mmap.c
4365 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4366 addr = PAGE_ALIGN(addr);
4367
4368 vma = find_vma(mm, addr);
4369 - if (TASK_SIZE - len >= addr &&
4370 - (!vma || addr + len <= vma->vm_start))
4371 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4372 return addr;
4373 }
4374
4375 @@ -106,7 +105,7 @@ full_search:
4376 }
4377 return -ENOMEM;
4378 }
4379 - if (likely(!vma || addr + len <= vma->vm_start)) {
4380 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4381 /*
4382 * Remember the place where we stopped the search:
4383 */
4384 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4385 addr = PAGE_ALIGN(addr);
4386
4387 vma = find_vma(mm, addr);
4388 - if (TASK_SIZE - len >= addr &&
4389 - (!vma || addr + len <= vma->vm_start))
4390 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4391 return addr;
4392 }
4393
4394 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4395 /* make sure it can fit in the remaining address space */
4396 if (likely(addr > len)) {
4397 vma = find_vma(mm, addr-len);
4398 - if (!vma || addr <= vma->vm_start) {
4399 + if (check_heap_stack_gap(vma, addr - len, len)) {
4400 /* remember the address as a hint for next time */
4401 return (mm->free_area_cache = addr-len);
4402 }
4403 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4404 if (unlikely(mm->mmap_base < len))
4405 goto bottomup;
4406
4407 - addr = mm->mmap_base-len;
4408 - if (do_colour_align)
4409 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4410 + addr = mm->mmap_base - len;
4411
4412 do {
4413 + if (do_colour_align)
4414 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4415 /*
4416 * Lookup failure means no vma is above this address,
4417 * else if new region fits below vma->vm_start,
4418 * return with success:
4419 */
4420 vma = find_vma(mm, addr);
4421 - if (likely(!vma || addr+len <= vma->vm_start)) {
4422 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4423 /* remember the address as a hint for next time */
4424 return (mm->free_area_cache = addr);
4425 }
4426 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4427 mm->cached_hole_size = vma->vm_start - addr;
4428
4429 /* try just below the current vma->vm_start */
4430 - addr = vma->vm_start-len;
4431 - if (do_colour_align)
4432 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4433 - } while (likely(len < vma->vm_start));
4434 + addr = skip_heap_stack_gap(vma, len);
4435 + } while (!IS_ERR_VALUE(addr));
4436
4437 bottomup:
4438 /*
4439 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
4440 index 05ef538..dc9c857 100644
4441 --- a/arch/sparc/Kconfig
4442 +++ b/arch/sparc/Kconfig
4443 @@ -32,6 +32,7 @@ config SPARC
4444
4445 config SPARC32
4446 def_bool !64BIT
4447 + select GENERIC_ATOMIC64
4448
4449 config SPARC64
4450 def_bool 64BIT
4451 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4452 index 113225b..7fd04e7 100644
4453 --- a/arch/sparc/Makefile
4454 +++ b/arch/sparc/Makefile
4455 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4456 # Export what is needed by arch/sparc/boot/Makefile
4457 export VMLINUX_INIT VMLINUX_MAIN
4458 VMLINUX_INIT := $(head-y) $(init-y)
4459 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4460 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4461 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4462 VMLINUX_MAIN += $(drivers-y) $(net-y)
4463
4464 diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
4465 index 8ff83d8..4a459c2 100644
4466 --- a/arch/sparc/include/asm/atomic.h
4467 +++ b/arch/sparc/include/asm/atomic.h
4468 @@ -4,5 +4,6 @@
4469 #include <asm/atomic_64.h>
4470 #else
4471 #include <asm/atomic_32.h>
4472 +#include <asm-generic/atomic64.h>
4473 #endif
4474 #endif
4475 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4476 index f5cc06f..f858d47 100644
4477 --- a/arch/sparc/include/asm/atomic_64.h
4478 +++ b/arch/sparc/include/asm/atomic_64.h
4479 @@ -14,18 +14,40 @@
4480 #define ATOMIC64_INIT(i) { (i) }
4481
4482 #define atomic_read(v) ((v)->counter)
4483 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4484 +{
4485 + return v->counter;
4486 +}
4487 #define atomic64_read(v) ((v)->counter)
4488 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4489 +{
4490 + return v->counter;
4491 +}
4492
4493 #define atomic_set(v, i) (((v)->counter) = i)
4494 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4495 +{
4496 + v->counter = i;
4497 +}
4498 #define atomic64_set(v, i) (((v)->counter) = i)
4499 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4500 +{
4501 + v->counter = i;
4502 +}
4503
4504 extern void atomic_add(int, atomic_t *);
4505 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4506 extern void atomic64_add(long, atomic64_t *);
4507 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4508 extern void atomic_sub(int, atomic_t *);
4509 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4510 extern void atomic64_sub(long, atomic64_t *);
4511 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4512
4513 extern int atomic_add_ret(int, atomic_t *);
4514 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4515 extern long atomic64_add_ret(long, atomic64_t *);
4516 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4517 extern int atomic_sub_ret(int, atomic_t *);
4518 extern long atomic64_sub_ret(long, atomic64_t *);
4519
4520 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4521 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4522
4523 #define atomic_inc_return(v) atomic_add_ret(1, v)
4524 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4525 +{
4526 + return atomic_add_ret_unchecked(1, v);
4527 +}
4528 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4529 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4530 +{
4531 + return atomic64_add_ret_unchecked(1, v);
4532 +}
4533
4534 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4535 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4536
4537 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4538 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4539 +{
4540 + return atomic_add_ret_unchecked(i, v);
4541 +}
4542 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4543 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4544 +{
4545 + return atomic64_add_ret_unchecked(i, v);
4546 +}
4547
4548 /*
4549 * atomic_inc_and_test - increment and test
4550 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4551 * other cases.
4552 */
4553 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4554 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4555 +{
4556 + return atomic_inc_return_unchecked(v) == 0;
4557 +}
4558 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4559
4560 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4561 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4562 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4563
4564 #define atomic_inc(v) atomic_add(1, v)
4565 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4566 +{
4567 + atomic_add_unchecked(1, v);
4568 +}
4569 #define atomic64_inc(v) atomic64_add(1, v)
4570 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4571 +{
4572 + atomic64_add_unchecked(1, v);
4573 +}
4574
4575 #define atomic_dec(v) atomic_sub(1, v)
4576 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4577 +{
4578 + atomic_sub_unchecked(1, v);
4579 +}
4580 #define atomic64_dec(v) atomic64_sub(1, v)
4581 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4582 +{
4583 + atomic64_sub_unchecked(1, v);
4584 +}
4585
4586 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4587 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4588
4589 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4590 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4591 +{
4592 + return cmpxchg(&v->counter, old, new);
4593 +}
4594 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4595 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4596 +{
4597 + return xchg(&v->counter, new);
4598 +}
4599
4600 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4601 {
4602 - int c, old;
4603 + int c, old, new;
4604 c = atomic_read(v);
4605 for (;;) {
4606 - if (unlikely(c == (u)))
4607 + if (unlikely(c == u))
4608 break;
4609 - old = atomic_cmpxchg((v), c, c + (a));
4610 +
4611 + asm volatile("addcc %2, %0, %0\n"
4612 +
4613 +#ifdef CONFIG_PAX_REFCOUNT
4614 + "tvs %%icc, 6\n"
4615 +#endif
4616 +
4617 + : "=r" (new)
4618 + : "0" (c), "ir" (a)
4619 + : "cc");
4620 +
4621 + old = atomic_cmpxchg(v, c, new);
4622 if (likely(old == c))
4623 break;
4624 c = old;
4625 }
4626 - return c != (u);
4627 + return c != u;
4628 }
4629
4630 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4631 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4632 #define atomic64_cmpxchg(v, o, n) \
4633 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4634 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4635 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4636 +{
4637 + return xchg(&v->counter, new);
4638 +}
4639
4640 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4641 {
4642 - long c, old;
4643 + long c, old, new;
4644 c = atomic64_read(v);
4645 for (;;) {
4646 - if (unlikely(c == (u)))
4647 + if (unlikely(c == u))
4648 break;
4649 - old = atomic64_cmpxchg((v), c, c + (a));
4650 +
4651 + asm volatile("addcc %2, %0, %0\n"
4652 +
4653 +#ifdef CONFIG_PAX_REFCOUNT
4654 + "tvs %%xcc, 6\n"
4655 +#endif
4656 +
4657 + : "=r" (new)
4658 + : "0" (c), "ir" (a)
4659 + : "cc");
4660 +
4661 + old = atomic64_cmpxchg(v, c, new);
4662 if (likely(old == c))
4663 break;
4664 c = old;
4665 }
4666 - return c != (u);
4667 + return c != u;
4668 }
4669
4670 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4671 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4672 index 41f85ae..fb54d5e 100644
4673 --- a/arch/sparc/include/asm/cache.h
4674 +++ b/arch/sparc/include/asm/cache.h
4675 @@ -8,7 +8,7 @@
4676 #define _SPARC_CACHE_H
4677
4678 #define L1_CACHE_SHIFT 5
4679 -#define L1_CACHE_BYTES 32
4680 +#define L1_CACHE_BYTES 32UL
4681 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4682
4683 #ifdef CONFIG_SPARC32
4684 diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4685 index 5a8c308..38def92 100644
4686 --- a/arch/sparc/include/asm/dma-mapping.h
4687 +++ b/arch/sparc/include/asm/dma-mapping.h
4688 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4689 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4690 #define dma_is_consistent(d, h) (1)
4691
4692 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4693 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4694 extern struct bus_type pci_bus_type;
4695
4696 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4697 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4698 {
4699 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4700 if (dev->bus == &pci_bus_type)
4701 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4702 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4703 dma_addr_t *dma_handle, gfp_t flag)
4704 {
4705 - struct dma_map_ops *ops = get_dma_ops(dev);
4706 + const struct dma_map_ops *ops = get_dma_ops(dev);
4707 void *cpu_addr;
4708
4709 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4710 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4711 static inline void dma_free_coherent(struct device *dev, size_t size,
4712 void *cpu_addr, dma_addr_t dma_handle)
4713 {
4714 - struct dma_map_ops *ops = get_dma_ops(dev);
4715 + const struct dma_map_ops *ops = get_dma_ops(dev);
4716
4717 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4718 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4719 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4720 index 381a1b5..b97e3ff 100644
4721 --- a/arch/sparc/include/asm/elf_32.h
4722 +++ b/arch/sparc/include/asm/elf_32.h
4723 @@ -116,6 +116,13 @@ typedef struct {
4724
4725 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4726
4727 +#ifdef CONFIG_PAX_ASLR
4728 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
4729 +
4730 +#define PAX_DELTA_MMAP_LEN 16
4731 +#define PAX_DELTA_STACK_LEN 16
4732 +#endif
4733 +
4734 /* This yields a mask that user programs can use to figure out what
4735 instruction set this cpu supports. This can NOT be done in userspace
4736 on Sparc. */
4737 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4738 index 9968085..c2106ef 100644
4739 --- a/arch/sparc/include/asm/elf_64.h
4740 +++ b/arch/sparc/include/asm/elf_64.h
4741 @@ -163,6 +163,12 @@ typedef struct {
4742 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4743 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4744
4745 +#ifdef CONFIG_PAX_ASLR
4746 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4747 +
4748 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4749 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4750 +#endif
4751
4752 /* This yields a mask that user programs can use to figure out what
4753 instruction set this cpu supports. */
4754 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
4755 index 156707b..aefa786 100644
4756 --- a/arch/sparc/include/asm/page_32.h
4757 +++ b/arch/sparc/include/asm/page_32.h
4758 @@ -8,6 +8,8 @@
4759 #ifndef _SPARC_PAGE_H
4760 #define _SPARC_PAGE_H
4761
4762 +#include <linux/const.h>
4763 +
4764 #define PAGE_SHIFT 12
4765
4766 #ifndef __ASSEMBLY__
4767 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4768 index e0cabe7..efd60f1 100644
4769 --- a/arch/sparc/include/asm/pgtable_32.h
4770 +++ b/arch/sparc/include/asm/pgtable_32.h
4771 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4772 BTFIXUPDEF_INT(page_none)
4773 BTFIXUPDEF_INT(page_copy)
4774 BTFIXUPDEF_INT(page_readonly)
4775 +
4776 +#ifdef CONFIG_PAX_PAGEEXEC
4777 +BTFIXUPDEF_INT(page_shared_noexec)
4778 +BTFIXUPDEF_INT(page_copy_noexec)
4779 +BTFIXUPDEF_INT(page_readonly_noexec)
4780 +#endif
4781 +
4782 BTFIXUPDEF_INT(page_kernel)
4783
4784 #define PMD_SHIFT SUN4C_PMD_SHIFT
4785 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4786 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4787 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4788
4789 +#ifdef CONFIG_PAX_PAGEEXEC
4790 +extern pgprot_t PAGE_SHARED_NOEXEC;
4791 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4792 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4793 +#else
4794 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4795 +# define PAGE_COPY_NOEXEC PAGE_COPY
4796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4797 +#endif
4798 +
4799 extern unsigned long page_kernel;
4800
4801 #ifdef MODULE
4802 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4803 index 1407c07..7e10231 100644
4804 --- a/arch/sparc/include/asm/pgtsrmmu.h
4805 +++ b/arch/sparc/include/asm/pgtsrmmu.h
4806 @@ -115,6 +115,13 @@
4807 SRMMU_EXEC | SRMMU_REF)
4808 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4809 SRMMU_EXEC | SRMMU_REF)
4810 +
4811 +#ifdef CONFIG_PAX_PAGEEXEC
4812 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4813 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4814 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4815 +#endif
4816 +
4817 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4818 SRMMU_DIRTY | SRMMU_REF)
4819
4820 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4821 index 43e5147..47622a1 100644
4822 --- a/arch/sparc/include/asm/spinlock_64.h
4823 +++ b/arch/sparc/include/asm/spinlock_64.h
4824 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4825
4826 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4827
4828 -static void inline arch_read_lock(raw_rwlock_t *lock)
4829 +static inline void arch_read_lock(raw_rwlock_t *lock)
4830 {
4831 unsigned long tmp1, tmp2;
4832
4833 __asm__ __volatile__ (
4834 "1: ldsw [%2], %0\n"
4835 " brlz,pn %0, 2f\n"
4836 -"4: add %0, 1, %1\n"
4837 +"4: addcc %0, 1, %1\n"
4838 +
4839 +#ifdef CONFIG_PAX_REFCOUNT
4840 +" tvs %%icc, 6\n"
4841 +#endif
4842 +
4843 " cas [%2], %0, %1\n"
4844 " cmp %0, %1\n"
4845 " bne,pn %%icc, 1b\n"
4846 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4847 " .previous"
4848 : "=&r" (tmp1), "=&r" (tmp2)
4849 : "r" (lock)
4850 - : "memory");
4851 + : "memory", "cc");
4852 }
4853
4854 -static int inline arch_read_trylock(raw_rwlock_t *lock)
4855 +static inline int arch_read_trylock(raw_rwlock_t *lock)
4856 {
4857 int tmp1, tmp2;
4858
4859 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4860 "1: ldsw [%2], %0\n"
4861 " brlz,a,pn %0, 2f\n"
4862 " mov 0, %0\n"
4863 -" add %0, 1, %1\n"
4864 +" addcc %0, 1, %1\n"
4865 +
4866 +#ifdef CONFIG_PAX_REFCOUNT
4867 +" tvs %%icc, 6\n"
4868 +#endif
4869 +
4870 " cas [%2], %0, %1\n"
4871 " cmp %0, %1\n"
4872 " bne,pn %%icc, 1b\n"
4873 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4874 return tmp1;
4875 }
4876
4877 -static void inline arch_read_unlock(raw_rwlock_t *lock)
4878 +static inline void arch_read_unlock(raw_rwlock_t *lock)
4879 {
4880 unsigned long tmp1, tmp2;
4881
4882 __asm__ __volatile__(
4883 "1: lduw [%2], %0\n"
4884 -" sub %0, 1, %1\n"
4885 +" subcc %0, 1, %1\n"
4886 +
4887 +#ifdef CONFIG_PAX_REFCOUNT
4888 +" tvs %%icc, 6\n"
4889 +#endif
4890 +
4891 " cas [%2], %0, %1\n"
4892 " cmp %0, %1\n"
4893 " bne,pn %%xcc, 1b\n"
4894 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4895 : "memory");
4896 }
4897
4898 -static void inline arch_write_lock(raw_rwlock_t *lock)
4899 +static inline void arch_write_lock(raw_rwlock_t *lock)
4900 {
4901 unsigned long mask, tmp1, tmp2;
4902
4903 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4904 : "memory");
4905 }
4906
4907 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4908 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4909 {
4910 __asm__ __volatile__(
4911 " stw %%g0, [%0]"
4912 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4913 : "memory");
4914 }
4915
4916 -static int inline arch_write_trylock(raw_rwlock_t *lock)
4917 +static inline int arch_write_trylock(raw_rwlock_t *lock)
4918 {
4919 unsigned long mask, tmp1, tmp2, result;
4920
4921 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4922 index 844d73a..f787fb9 100644
4923 --- a/arch/sparc/include/asm/thread_info_32.h
4924 +++ b/arch/sparc/include/asm/thread_info_32.h
4925 @@ -50,6 +50,8 @@ struct thread_info {
4926 unsigned long w_saved;
4927
4928 struct restart_block restart_block;
4929 +
4930 + unsigned long lowest_stack;
4931 };
4932
4933 /*
4934 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4935 index f78ad9a..9f55fc7 100644
4936 --- a/arch/sparc/include/asm/thread_info_64.h
4937 +++ b/arch/sparc/include/asm/thread_info_64.h
4938 @@ -68,6 +68,8 @@ struct thread_info {
4939 struct pt_regs *kern_una_regs;
4940 unsigned int kern_una_insn;
4941
4942 + unsigned long lowest_stack;
4943 +
4944 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4945 };
4946
4947 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4948 index e88fbe5..96b0ce5 100644
4949 --- a/arch/sparc/include/asm/uaccess.h
4950 +++ b/arch/sparc/include/asm/uaccess.h
4951 @@ -1,5 +1,13 @@
4952 #ifndef ___ASM_SPARC_UACCESS_H
4953 #define ___ASM_SPARC_UACCESS_H
4954 +
4955 +#ifdef __KERNEL__
4956 +#ifndef __ASSEMBLY__
4957 +#include <linux/types.h>
4958 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4959 +#endif
4960 +#endif
4961 +
4962 #if defined(__sparc__) && defined(__arch64__)
4963 #include <asm/uaccess_64.h>
4964 #else
4965 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4966 index 8303ac4..07f333d 100644
4967 --- a/arch/sparc/include/asm/uaccess_32.h
4968 +++ b/arch/sparc/include/asm/uaccess_32.h
4969 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4970
4971 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4972 {
4973 - if (n && __access_ok((unsigned long) to, n))
4974 + if ((long)n < 0)
4975 + return n;
4976 +
4977 + if (n && __access_ok((unsigned long) to, n)) {
4978 + if (!__builtin_constant_p(n))
4979 + check_object_size(from, n, true);
4980 return __copy_user(to, (__force void __user *) from, n);
4981 - else
4982 + } else
4983 return n;
4984 }
4985
4986 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4987 {
4988 + if ((long)n < 0)
4989 + return n;
4990 +
4991 + if (!__builtin_constant_p(n))
4992 + check_object_size(from, n, true);
4993 +
4994 return __copy_user(to, (__force void __user *) from, n);
4995 }
4996
4997 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4998 {
4999 - if (n && __access_ok((unsigned long) from, n))
5000 + if ((long)n < 0)
5001 + return n;
5002 +
5003 + if (n && __access_ok((unsigned long) from, n)) {
5004 + if (!__builtin_constant_p(n))
5005 + check_object_size(to, n, false);
5006 return __copy_user((__force void __user *) to, from, n);
5007 - else
5008 + } else
5009 return n;
5010 }
5011
5012 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5013 {
5014 + if ((long)n < 0)
5015 + return n;
5016 +
5017 return __copy_user((__force void __user *) to, from, n);
5018 }
5019
5020 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5021 index 9ea271e..7b8a271 100644
5022 --- a/arch/sparc/include/asm/uaccess_64.h
5023 +++ b/arch/sparc/include/asm/uaccess_64.h
5024 @@ -9,6 +9,7 @@
5025 #include <linux/compiler.h>
5026 #include <linux/string.h>
5027 #include <linux/thread_info.h>
5028 +#include <linux/kernel.h>
5029 #include <asm/asi.h>
5030 #include <asm/system.h>
5031 #include <asm/spitfire.h>
5032 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5033 static inline unsigned long __must_check
5034 copy_from_user(void *to, const void __user *from, unsigned long size)
5035 {
5036 - unsigned long ret = ___copy_from_user(to, from, size);
5037 + unsigned long ret;
5038
5039 + if ((long)size < 0 || size > INT_MAX)
5040 + return size;
5041 +
5042 + if (!__builtin_constant_p(size))
5043 + check_object_size(to, size, false);
5044 +
5045 + ret = ___copy_from_user(to, from, size);
5046 if (unlikely(ret))
5047 ret = copy_from_user_fixup(to, from, size);
5048 return ret;
5049 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5050 static inline unsigned long __must_check
5051 copy_to_user(void __user *to, const void *from, unsigned long size)
5052 {
5053 - unsigned long ret = ___copy_to_user(to, from, size);
5054 + unsigned long ret;
5055
5056 + if ((long)size < 0 || size > INT_MAX)
5057 + return size;
5058 +
5059 + if (!__builtin_constant_p(size))
5060 + check_object_size(from, size, true);
5061 +
5062 + ret = ___copy_to_user(to, from, size);
5063 if (unlikely(ret))
5064 ret = copy_to_user_fixup(to, from, size);
5065 return ret;
5066 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5067 index 2782681..77ded84 100644
5068 --- a/arch/sparc/kernel/Makefile
5069 +++ b/arch/sparc/kernel/Makefile
5070 @@ -3,7 +3,7 @@
5071 #
5072
5073 asflags-y := -ansi
5074 -ccflags-y := -Werror
5075 +#ccflags-y := -Werror
5076
5077 extra-y := head_$(BITS).o
5078 extra-y += init_task.o
5079 diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
5080 index 7690cc2..ece64c9 100644
5081 --- a/arch/sparc/kernel/iommu.c
5082 +++ b/arch/sparc/kernel/iommu.c
5083 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
5084 spin_unlock_irqrestore(&iommu->lock, flags);
5085 }
5086
5087 -static struct dma_map_ops sun4u_dma_ops = {
5088 +static const struct dma_map_ops sun4u_dma_ops = {
5089 .alloc_coherent = dma_4u_alloc_coherent,
5090 .free_coherent = dma_4u_free_coherent,
5091 .map_page = dma_4u_map_page,
5092 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
5093 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
5094 };
5095
5096 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5097 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5098 EXPORT_SYMBOL(dma_ops);
5099
5100 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
5101 diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
5102 index 9f61fd8..bd048db 100644
5103 --- a/arch/sparc/kernel/ioport.c
5104 +++ b/arch/sparc/kernel/ioport.c
5105 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
5106 BUG();
5107 }
5108
5109 -struct dma_map_ops sbus_dma_ops = {
5110 +const struct dma_map_ops sbus_dma_ops = {
5111 .alloc_coherent = sbus_alloc_coherent,
5112 .free_coherent = sbus_free_coherent,
5113 .map_page = sbus_map_page,
5114 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
5115 .sync_sg_for_device = sbus_sync_sg_for_device,
5116 };
5117
5118 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
5119 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
5120 EXPORT_SYMBOL(dma_ops);
5121
5122 static int __init sparc_register_ioport(void)
5123 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
5124 }
5125 }
5126
5127 -struct dma_map_ops pci32_dma_ops = {
5128 +const struct dma_map_ops pci32_dma_ops = {
5129 .alloc_coherent = pci32_alloc_coherent,
5130 .free_coherent = pci32_free_coherent,
5131 .map_page = pci32_map_page,
5132 diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
5133 index 04df4ed..55c4b6e 100644
5134 --- a/arch/sparc/kernel/kgdb_32.c
5135 +++ b/arch/sparc/kernel/kgdb_32.c
5136 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
5137 {
5138 }
5139
5140 -struct kgdb_arch arch_kgdb_ops = {
5141 +const struct kgdb_arch arch_kgdb_ops = {
5142 /* Breakpoint instruction: ta 0x7d */
5143 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
5144 };
5145 diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
5146 index f5a0fd4..d886f71 100644
5147 --- a/arch/sparc/kernel/kgdb_64.c
5148 +++ b/arch/sparc/kernel/kgdb_64.c
5149 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
5150 {
5151 }
5152
5153 -struct kgdb_arch arch_kgdb_ops = {
5154 +const struct kgdb_arch arch_kgdb_ops = {
5155 /* Breakpoint instruction: ta 0x72 */
5156 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
5157 };
5158 diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
5159 index 23c33ff..d137fbd 100644
5160 --- a/arch/sparc/kernel/pci_sun4v.c
5161 +++ b/arch/sparc/kernel/pci_sun4v.c
5162 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
5163 spin_unlock_irqrestore(&iommu->lock, flags);
5164 }
5165
5166 -static struct dma_map_ops sun4v_dma_ops = {
5167 +static const struct dma_map_ops sun4v_dma_ops = {
5168 .alloc_coherent = dma_4v_alloc_coherent,
5169 .free_coherent = dma_4v_free_coherent,
5170 .map_page = dma_4v_map_page,
5171 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5172 index c49865b..b41a81b 100644
5173 --- a/arch/sparc/kernel/process_32.c
5174 +++ b/arch/sparc/kernel/process_32.c
5175 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
5176 rw->ins[4], rw->ins[5],
5177 rw->ins[6],
5178 rw->ins[7]);
5179 - printk("%pS\n", (void *) rw->ins[7]);
5180 + printk("%pA\n", (void *) rw->ins[7]);
5181 rw = (struct reg_window32 *) rw->ins[6];
5182 }
5183 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5184 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
5185
5186 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5187 r->psr, r->pc, r->npc, r->y, print_tainted());
5188 - printk("PC: <%pS>\n", (void *) r->pc);
5189 + printk("PC: <%pA>\n", (void *) r->pc);
5190 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5191 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5192 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5193 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5194 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5195 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5196 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5197 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5198
5199 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5200 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5201 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5202 rw = (struct reg_window32 *) fp;
5203 pc = rw->ins[7];
5204 printk("[%08lx : ", pc);
5205 - printk("%pS ] ", (void *) pc);
5206 + printk("%pA ] ", (void *) pc);
5207 fp = rw->ins[6];
5208 } while (++count < 16);
5209 printk("\n");
5210 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5211 index cb70476..3d0c191 100644
5212 --- a/arch/sparc/kernel/process_64.c
5213 +++ b/arch/sparc/kernel/process_64.c
5214 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
5215 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5216 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5217 if (regs->tstate & TSTATE_PRIV)
5218 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5219 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5220 }
5221
5222 void show_regs(struct pt_regs *regs)
5223 {
5224 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5225 regs->tpc, regs->tnpc, regs->y, print_tainted());
5226 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5227 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5228 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5229 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5230 regs->u_regs[3]);
5231 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5232 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5233 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5234 regs->u_regs[15]);
5235 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5236 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5237 show_regwindow(regs);
5238 }
5239
5240 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5241 ((tp && tp->task) ? tp->task->pid : -1));
5242
5243 if (gp->tstate & TSTATE_PRIV) {
5244 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5245 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5246 (void *) gp->tpc,
5247 (void *) gp->o7,
5248 (void *) gp->i7,
5249 diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5250 index 6edc4e5..06a69b4 100644
5251 --- a/arch/sparc/kernel/sigutil_64.c
5252 +++ b/arch/sparc/kernel/sigutil_64.c
5253 @@ -2,6 +2,7 @@
5254 #include <linux/types.h>
5255 #include <linux/thread_info.h>
5256 #include <linux/uaccess.h>
5257 +#include <linux/errno.h>
5258
5259 #include <asm/sigcontext.h>
5260 #include <asm/fpumacro.h>
5261 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5262 index 3a82e65..ce0a53a 100644
5263 --- a/arch/sparc/kernel/sys_sparc_32.c
5264 +++ b/arch/sparc/kernel/sys_sparc_32.c
5265 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5266 if (ARCH_SUN4C && len > 0x20000000)
5267 return -ENOMEM;
5268 if (!addr)
5269 - addr = TASK_UNMAPPED_BASE;
5270 + addr = current->mm->mmap_base;
5271
5272 if (flags & MAP_SHARED)
5273 addr = COLOUR_ALIGN(addr);
5274 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5275 }
5276 if (TASK_SIZE - PAGE_SIZE - len < addr)
5277 return -ENOMEM;
5278 - if (!vmm || addr + len <= vmm->vm_start)
5279 + if (check_heap_stack_gap(vmm, addr, len))
5280 return addr;
5281 addr = vmm->vm_end;
5282 if (flags & MAP_SHARED)
5283 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5284 index cfa0e19..98972ac 100644
5285 --- a/arch/sparc/kernel/sys_sparc_64.c
5286 +++ b/arch/sparc/kernel/sys_sparc_64.c
5287 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5288 /* We do not accept a shared mapping if it would violate
5289 * cache aliasing constraints.
5290 */
5291 - if ((flags & MAP_SHARED) &&
5292 + if ((filp || (flags & MAP_SHARED)) &&
5293 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5294 return -EINVAL;
5295 return addr;
5296 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5297 if (filp || (flags & MAP_SHARED))
5298 do_color_align = 1;
5299
5300 +#ifdef CONFIG_PAX_RANDMMAP
5301 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5302 +#endif
5303 +
5304 if (addr) {
5305 if (do_color_align)
5306 addr = COLOUR_ALIGN(addr, pgoff);
5307 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5308 addr = PAGE_ALIGN(addr);
5309
5310 vma = find_vma(mm, addr);
5311 - if (task_size - len >= addr &&
5312 - (!vma || addr + len <= vma->vm_start))
5313 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5314 return addr;
5315 }
5316
5317 if (len > mm->cached_hole_size) {
5318 - start_addr = addr = mm->free_area_cache;
5319 + start_addr = addr = mm->free_area_cache;
5320 } else {
5321 - start_addr = addr = TASK_UNMAPPED_BASE;
5322 + start_addr = addr = mm->mmap_base;
5323 mm->cached_hole_size = 0;
5324 }
5325
5326 @@ -175,14 +178,14 @@ full_search:
5327 vma = find_vma(mm, VA_EXCLUDE_END);
5328 }
5329 if (unlikely(task_size < addr)) {
5330 - if (start_addr != TASK_UNMAPPED_BASE) {
5331 - start_addr = addr = TASK_UNMAPPED_BASE;
5332 + if (start_addr != mm->mmap_base) {
5333 + start_addr = addr = mm->mmap_base;
5334 mm->cached_hole_size = 0;
5335 goto full_search;
5336 }
5337 return -ENOMEM;
5338 }
5339 - if (likely(!vma || addr + len <= vma->vm_start)) {
5340 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5341 /*
5342 * Remember the place where we stopped the search:
5343 */
5344 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5345 /* We do not accept a shared mapping if it would violate
5346 * cache aliasing constraints.
5347 */
5348 - if ((flags & MAP_SHARED) &&
5349 + if ((filp || (flags & MAP_SHARED)) &&
5350 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5351 return -EINVAL;
5352 return addr;
5353 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5354 addr = PAGE_ALIGN(addr);
5355
5356 vma = find_vma(mm, addr);
5357 - if (task_size - len >= addr &&
5358 - (!vma || addr + len <= vma->vm_start))
5359 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5360 return addr;
5361 }
5362
5363 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5364 /* make sure it can fit in the remaining address space */
5365 if (likely(addr > len)) {
5366 vma = find_vma(mm, addr-len);
5367 - if (!vma || addr <= vma->vm_start) {
5368 + if (check_heap_stack_gap(vma, addr - len, len)) {
5369 /* remember the address as a hint for next time */
5370 return (mm->free_area_cache = addr-len);
5371 }
5372 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5373 if (unlikely(mm->mmap_base < len))
5374 goto bottomup;
5375
5376 - addr = mm->mmap_base-len;
5377 - if (do_color_align)
5378 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5379 + addr = mm->mmap_base - len;
5380
5381 do {
5382 + if (do_color_align)
5383 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5384 /*
5385 * Lookup failure means no vma is above this address,
5386 * else if new region fits below vma->vm_start,
5387 * return with success:
5388 */
5389 vma = find_vma(mm, addr);
5390 - if (likely(!vma || addr+len <= vma->vm_start)) {
5391 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5392 /* remember the address as a hint for next time */
5393 return (mm->free_area_cache = addr);
5394 }
5395 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5396 mm->cached_hole_size = vma->vm_start - addr;
5397
5398 /* try just below the current vma->vm_start */
5399 - addr = vma->vm_start-len;
5400 - if (do_color_align)
5401 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5402 - } while (likely(len < vma->vm_start));
5403 + addr = skip_heap_stack_gap(vma, len);
5404 + } while (!IS_ERR_VALUE(addr));
5405
5406 bottomup:
5407 /*
5408 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5409 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5410 sysctl_legacy_va_layout) {
5411 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5412 +
5413 +#ifdef CONFIG_PAX_RANDMMAP
5414 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5415 + mm->mmap_base += mm->delta_mmap;
5416 +#endif
5417 +
5418 mm->get_unmapped_area = arch_get_unmapped_area;
5419 mm->unmap_area = arch_unmap_area;
5420 } else {
5421 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5422 gap = (task_size / 6 * 5);
5423
5424 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5425 +
5426 +#ifdef CONFIG_PAX_RANDMMAP
5427 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5428 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5429 +#endif
5430 +
5431 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5432 mm->unmap_area = arch_unmap_area_topdown;
5433 }
5434 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5435 index c0490c7..84959d1 100644
5436 --- a/arch/sparc/kernel/traps_32.c
5437 +++ b/arch/sparc/kernel/traps_32.c
5438 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5439 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5440 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5441
5442 +extern void gr_handle_kernel_exploit(void);
5443 +
5444 void die_if_kernel(char *str, struct pt_regs *regs)
5445 {
5446 static int die_counter;
5447 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5448 count++ < 30 &&
5449 (((unsigned long) rw) >= PAGE_OFFSET) &&
5450 !(((unsigned long) rw) & 0x7)) {
5451 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5452 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5453 (void *) rw->ins[7]);
5454 rw = (struct reg_window32 *)rw->ins[6];
5455 }
5456 }
5457 printk("Instruction DUMP:");
5458 instruction_dump ((unsigned long *) regs->pc);
5459 - if(regs->psr & PSR_PS)
5460 + if(regs->psr & PSR_PS) {
5461 + gr_handle_kernel_exploit();
5462 do_exit(SIGKILL);
5463 + }
5464 do_exit(SIGSEGV);
5465 }
5466
5467 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5468 index 10f7bb9..cdb6793 100644
5469 --- a/arch/sparc/kernel/traps_64.c
5470 +++ b/arch/sparc/kernel/traps_64.c
5471 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5472 i + 1,
5473 p->trapstack[i].tstate, p->trapstack[i].tpc,
5474 p->trapstack[i].tnpc, p->trapstack[i].tt);
5475 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5476 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5477 }
5478 }
5479
5480 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5481
5482 lvl -= 0x100;
5483 if (regs->tstate & TSTATE_PRIV) {
5484 +
5485 +#ifdef CONFIG_PAX_REFCOUNT
5486 + if (lvl == 6)
5487 + pax_report_refcount_overflow(regs);
5488 +#endif
5489 +
5490 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5491 die_if_kernel(buffer, regs);
5492 }
5493 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5494 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5495 {
5496 char buffer[32];
5497 -
5498 +
5499 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5500 0, lvl, SIGTRAP) == NOTIFY_STOP)
5501 return;
5502
5503 +#ifdef CONFIG_PAX_REFCOUNT
5504 + if (lvl == 6)
5505 + pax_report_refcount_overflow(regs);
5506 +#endif
5507 +
5508 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5509
5510 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5511 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5512 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5513 printk("%s" "ERROR(%d): ",
5514 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5515 - printk("TPC<%pS>\n", (void *) regs->tpc);
5516 + printk("TPC<%pA>\n", (void *) regs->tpc);
5517 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5518 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5519 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5520 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5521 smp_processor_id(),
5522 (type & 0x1) ? 'I' : 'D',
5523 regs->tpc);
5524 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5525 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5526 panic("Irrecoverable Cheetah+ parity error.");
5527 }
5528
5529 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5530 smp_processor_id(),
5531 (type & 0x1) ? 'I' : 'D',
5532 regs->tpc);
5533 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5534 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5535 }
5536
5537 struct sun4v_error_entry {
5538 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5539
5540 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5541 regs->tpc, tl);
5542 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5543 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5544 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5545 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5546 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5547 (void *) regs->u_regs[UREG_I7]);
5548 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5549 "pte[%lx] error[%lx]\n",
5550 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5551
5552 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5553 regs->tpc, tl);
5554 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5555 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5556 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5557 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5558 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5559 (void *) regs->u_regs[UREG_I7]);
5560 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5561 "pte[%lx] error[%lx]\n",
5562 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5563 fp = (unsigned long)sf->fp + STACK_BIAS;
5564 }
5565
5566 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5567 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5568 } while (++count < 16);
5569 }
5570
5571 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5572 return (struct reg_window *) (fp + STACK_BIAS);
5573 }
5574
5575 +extern void gr_handle_kernel_exploit(void);
5576 +
5577 void die_if_kernel(char *str, struct pt_regs *regs)
5578 {
5579 static int die_counter;
5580 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5581 while (rw &&
5582 count++ < 30&&
5583 is_kernel_stack(current, rw)) {
5584 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5585 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5586 (void *) rw->ins[7]);
5587
5588 rw = kernel_stack_up(rw);
5589 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5590 }
5591 user_instruction_dump ((unsigned int __user *) regs->tpc);
5592 }
5593 - if (regs->tstate & TSTATE_PRIV)
5594 + if (regs->tstate & TSTATE_PRIV) {
5595 + gr_handle_kernel_exploit();
5596 do_exit(SIGKILL);
5597 + }
5598 +
5599 do_exit(SIGSEGV);
5600 }
5601 EXPORT_SYMBOL(die_if_kernel);
5602 diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5603 index be183fe..1c8d332 100644
5604 --- a/arch/sparc/kernel/una_asm_64.S
5605 +++ b/arch/sparc/kernel/una_asm_64.S
5606 @@ -127,7 +127,7 @@ do_int_load:
5607 wr %o5, 0x0, %asi
5608 retl
5609 mov 0, %o0
5610 - .size __do_int_load, .-__do_int_load
5611 + .size do_int_load, .-do_int_load
5612
5613 .section __ex_table,"a"
5614 .word 4b, __retl_efault
5615 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5616 index 3792099..2af17d8 100644
5617 --- a/arch/sparc/kernel/unaligned_64.c
5618 +++ b/arch/sparc/kernel/unaligned_64.c
5619 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5620 if (count < 5) {
5621 last_time = jiffies;
5622 count++;
5623 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5624 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5625 regs->tpc, (void *) regs->tpc);
5626 }
5627 }
5628 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5629 index e75faf0..24f12f9 100644
5630 --- a/arch/sparc/lib/Makefile
5631 +++ b/arch/sparc/lib/Makefile
5632 @@ -2,7 +2,7 @@
5633 #
5634
5635 asflags-y := -ansi -DST_DIV0=0x02
5636 -ccflags-y := -Werror
5637 +#ccflags-y := -Werror
5638
5639 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5640 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5641 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5642 index 0268210..f0291ca 100644
5643 --- a/arch/sparc/lib/atomic_64.S
5644 +++ b/arch/sparc/lib/atomic_64.S
5645 @@ -18,7 +18,12 @@
5646 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5647 BACKOFF_SETUP(%o2)
5648 1: lduw [%o1], %g1
5649 - add %g1, %o0, %g7
5650 + addcc %g1, %o0, %g7
5651 +
5652 +#ifdef CONFIG_PAX_REFCOUNT
5653 + tvs %icc, 6
5654 +#endif
5655 +
5656 cas [%o1], %g1, %g7
5657 cmp %g1, %g7
5658 bne,pn %icc, 2f
5659 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5660 2: BACKOFF_SPIN(%o2, %o3, 1b)
5661 .size atomic_add, .-atomic_add
5662
5663 + .globl atomic_add_unchecked
5664 + .type atomic_add_unchecked,#function
5665 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5666 + BACKOFF_SETUP(%o2)
5667 +1: lduw [%o1], %g1
5668 + add %g1, %o0, %g7
5669 + cas [%o1], %g1, %g7
5670 + cmp %g1, %g7
5671 + bne,pn %icc, 2f
5672 + nop
5673 + retl
5674 + nop
5675 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5676 + .size atomic_add_unchecked, .-atomic_add_unchecked
5677 +
5678 .globl atomic_sub
5679 .type atomic_sub,#function
5680 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5681 BACKOFF_SETUP(%o2)
5682 1: lduw [%o1], %g1
5683 - sub %g1, %o0, %g7
5684 + subcc %g1, %o0, %g7
5685 +
5686 +#ifdef CONFIG_PAX_REFCOUNT
5687 + tvs %icc, 6
5688 +#endif
5689 +
5690 cas [%o1], %g1, %g7
5691 cmp %g1, %g7
5692 bne,pn %icc, 2f
5693 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5694 2: BACKOFF_SPIN(%o2, %o3, 1b)
5695 .size atomic_sub, .-atomic_sub
5696
5697 + .globl atomic_sub_unchecked
5698 + .type atomic_sub_unchecked,#function
5699 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5700 + BACKOFF_SETUP(%o2)
5701 +1: lduw [%o1], %g1
5702 + sub %g1, %o0, %g7
5703 + cas [%o1], %g1, %g7
5704 + cmp %g1, %g7
5705 + bne,pn %icc, 2f
5706 + nop
5707 + retl
5708 + nop
5709 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5710 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5711 +
5712 .globl atomic_add_ret
5713 .type atomic_add_ret,#function
5714 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5715 BACKOFF_SETUP(%o2)
5716 1: lduw [%o1], %g1
5717 - add %g1, %o0, %g7
5718 + addcc %g1, %o0, %g7
5719 +
5720 +#ifdef CONFIG_PAX_REFCOUNT
5721 + tvs %icc, 6
5722 +#endif
5723 +
5724 cas [%o1], %g1, %g7
5725 cmp %g1, %g7
5726 bne,pn %icc, 2f
5727 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5728 2: BACKOFF_SPIN(%o2, %o3, 1b)
5729 .size atomic_add_ret, .-atomic_add_ret
5730
5731 + .globl atomic_add_ret_unchecked
5732 + .type atomic_add_ret_unchecked,#function
5733 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5734 + BACKOFF_SETUP(%o2)
5735 +1: lduw [%o1], %g1
5736 + addcc %g1, %o0, %g7
5737 + cas [%o1], %g1, %g7
5738 + cmp %g1, %g7
5739 + bne,pn %icc, 2f
5740 + add %g7, %o0, %g7
5741 + sra %g7, 0, %o0
5742 + retl
5743 + nop
5744 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5745 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5746 +
5747 .globl atomic_sub_ret
5748 .type atomic_sub_ret,#function
5749 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5750 BACKOFF_SETUP(%o2)
5751 1: lduw [%o1], %g1
5752 - sub %g1, %o0, %g7
5753 + subcc %g1, %o0, %g7
5754 +
5755 +#ifdef CONFIG_PAX_REFCOUNT
5756 + tvs %icc, 6
5757 +#endif
5758 +
5759 cas [%o1], %g1, %g7
5760 cmp %g1, %g7
5761 bne,pn %icc, 2f
5762 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5763 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5764 BACKOFF_SETUP(%o2)
5765 1: ldx [%o1], %g1
5766 - add %g1, %o0, %g7
5767 + addcc %g1, %o0, %g7
5768 +
5769 +#ifdef CONFIG_PAX_REFCOUNT
5770 + tvs %xcc, 6
5771 +#endif
5772 +
5773 casx [%o1], %g1, %g7
5774 cmp %g1, %g7
5775 bne,pn %xcc, 2f
5776 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5777 2: BACKOFF_SPIN(%o2, %o3, 1b)
5778 .size atomic64_add, .-atomic64_add
5779
5780 + .globl atomic64_add_unchecked
5781 + .type atomic64_add_unchecked,#function
5782 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5783 + BACKOFF_SETUP(%o2)
5784 +1: ldx [%o1], %g1
5785 + addcc %g1, %o0, %g7
5786 + casx [%o1], %g1, %g7
5787 + cmp %g1, %g7
5788 + bne,pn %xcc, 2f
5789 + nop
5790 + retl
5791 + nop
5792 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5793 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
5794 +
5795 .globl atomic64_sub
5796 .type atomic64_sub,#function
5797 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5798 BACKOFF_SETUP(%o2)
5799 1: ldx [%o1], %g1
5800 - sub %g1, %o0, %g7
5801 + subcc %g1, %o0, %g7
5802 +
5803 +#ifdef CONFIG_PAX_REFCOUNT
5804 + tvs %xcc, 6
5805 +#endif
5806 +
5807 casx [%o1], %g1, %g7
5808 cmp %g1, %g7
5809 bne,pn %xcc, 2f
5810 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5811 2: BACKOFF_SPIN(%o2, %o3, 1b)
5812 .size atomic64_sub, .-atomic64_sub
5813
5814 + .globl atomic64_sub_unchecked
5815 + .type atomic64_sub_unchecked,#function
5816 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5817 + BACKOFF_SETUP(%o2)
5818 +1: ldx [%o1], %g1
5819 + subcc %g1, %o0, %g7
5820 + casx [%o1], %g1, %g7
5821 + cmp %g1, %g7
5822 + bne,pn %xcc, 2f
5823 + nop
5824 + retl
5825 + nop
5826 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5827 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5828 +
5829 .globl atomic64_add_ret
5830 .type atomic64_add_ret,#function
5831 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5832 BACKOFF_SETUP(%o2)
5833 1: ldx [%o1], %g1
5834 - add %g1, %o0, %g7
5835 + addcc %g1, %o0, %g7
5836 +
5837 +#ifdef CONFIG_PAX_REFCOUNT
5838 + tvs %xcc, 6
5839 +#endif
5840 +
5841 casx [%o1], %g1, %g7
5842 cmp %g1, %g7
5843 bne,pn %xcc, 2f
5844 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5845 2: BACKOFF_SPIN(%o2, %o3, 1b)
5846 .size atomic64_add_ret, .-atomic64_add_ret
5847
5848 + .globl atomic64_add_ret_unchecked
5849 + .type atomic64_add_ret_unchecked,#function
5850 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5851 + BACKOFF_SETUP(%o2)
5852 +1: ldx [%o1], %g1
5853 + addcc %g1, %o0, %g7
5854 + casx [%o1], %g1, %g7
5855 + cmp %g1, %g7
5856 + bne,pn %xcc, 2f
5857 + add %g7, %o0, %g7
5858 + mov %g7, %o0
5859 + retl
5860 + nop
5861 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5862 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5863 +
5864 .globl atomic64_sub_ret
5865 .type atomic64_sub_ret,#function
5866 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5867 BACKOFF_SETUP(%o2)
5868 1: ldx [%o1], %g1
5869 - sub %g1, %o0, %g7
5870 + subcc %g1, %o0, %g7
5871 +
5872 +#ifdef CONFIG_PAX_REFCOUNT
5873 + tvs %xcc, 6
5874 +#endif
5875 +
5876 casx [%o1], %g1, %g7
5877 cmp %g1, %g7
5878 bne,pn %xcc, 2f
5879 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5880 index 704b126..2e79d76 100644
5881 --- a/arch/sparc/lib/ksyms.c
5882 +++ b/arch/sparc/lib/ksyms.c
5883 @@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5884
5885 /* Atomic counter implementation. */
5886 EXPORT_SYMBOL(atomic_add);
5887 +EXPORT_SYMBOL(atomic_add_unchecked);
5888 EXPORT_SYMBOL(atomic_add_ret);
5889 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5890 EXPORT_SYMBOL(atomic_sub);
5891 +EXPORT_SYMBOL(atomic_sub_unchecked);
5892 EXPORT_SYMBOL(atomic_sub_ret);
5893 EXPORT_SYMBOL(atomic64_add);
5894 +EXPORT_SYMBOL(atomic64_add_unchecked);
5895 EXPORT_SYMBOL(atomic64_add_ret);
5896 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5897 EXPORT_SYMBOL(atomic64_sub);
5898 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5899 EXPORT_SYMBOL(atomic64_sub_ret);
5900
5901 /* Atomic bit operations. */
5902 diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5903 index 91a7d29..ce75c29 100644
5904 --- a/arch/sparc/lib/rwsem_64.S
5905 +++ b/arch/sparc/lib/rwsem_64.S
5906 @@ -11,7 +11,12 @@
5907 .globl __down_read
5908 __down_read:
5909 1: lduw [%o0], %g1
5910 - add %g1, 1, %g7
5911 + addcc %g1, 1, %g7
5912 +
5913 +#ifdef CONFIG_PAX_REFCOUNT
5914 + tvs %icc, 6
5915 +#endif
5916 +
5917 cas [%o0], %g1, %g7
5918 cmp %g1, %g7
5919 bne,pn %icc, 1b
5920 @@ -33,7 +38,12 @@ __down_read:
5921 .globl __down_read_trylock
5922 __down_read_trylock:
5923 1: lduw [%o0], %g1
5924 - add %g1, 1, %g7
5925 + addcc %g1, 1, %g7
5926 +
5927 +#ifdef CONFIG_PAX_REFCOUNT
5928 + tvs %icc, 6
5929 +#endif
5930 +
5931 cmp %g7, 0
5932 bl,pn %icc, 2f
5933 mov 0, %o1
5934 @@ -51,7 +61,12 @@ __down_write:
5935 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5936 1:
5937 lduw [%o0], %g3
5938 - add %g3, %g1, %g7
5939 + addcc %g3, %g1, %g7
5940 +
5941 +#ifdef CONFIG_PAX_REFCOUNT
5942 + tvs %icc, 6
5943 +#endif
5944 +
5945 cas [%o0], %g3, %g7
5946 cmp %g3, %g7
5947 bne,pn %icc, 1b
5948 @@ -77,7 +92,12 @@ __down_write_trylock:
5949 cmp %g3, 0
5950 bne,pn %icc, 2f
5951 mov 0, %o1
5952 - add %g3, %g1, %g7
5953 + addcc %g3, %g1, %g7
5954 +
5955 +#ifdef CONFIG_PAX_REFCOUNT
5956 + tvs %icc, 6
5957 +#endif
5958 +
5959 cas [%o0], %g3, %g7
5960 cmp %g3, %g7
5961 bne,pn %icc, 1b
5962 @@ -90,7 +110,12 @@ __down_write_trylock:
5963 __up_read:
5964 1:
5965 lduw [%o0], %g1
5966 - sub %g1, 1, %g7
5967 + subcc %g1, 1, %g7
5968 +
5969 +#ifdef CONFIG_PAX_REFCOUNT
5970 + tvs %icc, 6
5971 +#endif
5972 +
5973 cas [%o0], %g1, %g7
5974 cmp %g1, %g7
5975 bne,pn %icc, 1b
5976 @@ -118,7 +143,12 @@ __up_write:
5977 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5978 1:
5979 lduw [%o0], %g3
5980 - sub %g3, %g1, %g7
5981 + subcc %g3, %g1, %g7
5982 +
5983 +#ifdef CONFIG_PAX_REFCOUNT
5984 + tvs %icc, 6
5985 +#endif
5986 +
5987 cas [%o0], %g3, %g7
5988 cmp %g3, %g7
5989 bne,pn %icc, 1b
5990 @@ -143,7 +173,12 @@ __downgrade_write:
5991 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5992 1:
5993 lduw [%o0], %g3
5994 - sub %g3, %g1, %g7
5995 + subcc %g3, %g1, %g7
5996 +
5997 +#ifdef CONFIG_PAX_REFCOUNT
5998 + tvs %icc, 6
5999 +#endif
6000 +
6001 cas [%o0], %g3, %g7
6002 cmp %g3, %g7
6003 bne,pn %icc, 1b
6004 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6005 index 79836a7..62f47a2 100644
6006 --- a/arch/sparc/mm/Makefile
6007 +++ b/arch/sparc/mm/Makefile
6008 @@ -2,7 +2,7 @@
6009 #
6010
6011 asflags-y := -ansi
6012 -ccflags-y := -Werror
6013 +#ccflags-y := -Werror
6014
6015 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6016 obj-y += fault_$(BITS).o
6017 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6018 index b99f81c..3453e93 100644
6019 --- a/arch/sparc/mm/fault_32.c
6020 +++ b/arch/sparc/mm/fault_32.c
6021 @@ -21,6 +21,9 @@
6022 #include <linux/interrupt.h>
6023 #include <linux/module.h>
6024 #include <linux/kdebug.h>
6025 +#include <linux/slab.h>
6026 +#include <linux/pagemap.h>
6027 +#include <linux/compiler.h>
6028
6029 #include <asm/system.h>
6030 #include <asm/page.h>
6031 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6032 return safe_compute_effective_address(regs, insn);
6033 }
6034
6035 +#ifdef CONFIG_PAX_PAGEEXEC
6036 +#ifdef CONFIG_PAX_DLRESOLVE
6037 +static void pax_emuplt_close(struct vm_area_struct *vma)
6038 +{
6039 + vma->vm_mm->call_dl_resolve = 0UL;
6040 +}
6041 +
6042 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6043 +{
6044 + unsigned int *kaddr;
6045 +
6046 + vmf->page = alloc_page(GFP_HIGHUSER);
6047 + if (!vmf->page)
6048 + return VM_FAULT_OOM;
6049 +
6050 + kaddr = kmap(vmf->page);
6051 + memset(kaddr, 0, PAGE_SIZE);
6052 + kaddr[0] = 0x9DE3BFA8U; /* save */
6053 + flush_dcache_page(vmf->page);
6054 + kunmap(vmf->page);
6055 + return VM_FAULT_MAJOR;
6056 +}
6057 +
6058 +static const struct vm_operations_struct pax_vm_ops = {
6059 + .close = pax_emuplt_close,
6060 + .fault = pax_emuplt_fault
6061 +};
6062 +
6063 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6064 +{
6065 + int ret;
6066 +
6067 + vma->vm_mm = current->mm;
6068 + vma->vm_start = addr;
6069 + vma->vm_end = addr + PAGE_SIZE;
6070 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6071 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6072 + vma->vm_ops = &pax_vm_ops;
6073 +
6074 + ret = insert_vm_struct(current->mm, vma);
6075 + if (ret)
6076 + return ret;
6077 +
6078 + ++current->mm->total_vm;
6079 + return 0;
6080 +}
6081 +#endif
6082 +
6083 +/*
6084 + * PaX: decide what to do with offenders (regs->pc = fault address)
6085 + *
6086 + * returns 1 when task should be killed
6087 + * 2 when patched PLT trampoline was detected
6088 + * 3 when unpatched PLT trampoline was detected
6089 + */
6090 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6091 +{
6092 +
6093 +#ifdef CONFIG_PAX_EMUPLT
6094 + int err;
6095 +
6096 + do { /* PaX: patched PLT emulation #1 */
6097 + unsigned int sethi1, sethi2, jmpl;
6098 +
6099 + err = get_user(sethi1, (unsigned int *)regs->pc);
6100 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6101 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6102 +
6103 + if (err)
6104 + break;
6105 +
6106 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6107 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6108 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6109 + {
6110 + unsigned int addr;
6111 +
6112 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6113 + addr = regs->u_regs[UREG_G1];
6114 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6115 + regs->pc = addr;
6116 + regs->npc = addr+4;
6117 + return 2;
6118 + }
6119 + } while (0);
6120 +
6121 + { /* PaX: patched PLT emulation #2 */
6122 + unsigned int ba;
6123 +
6124 + err = get_user(ba, (unsigned int *)regs->pc);
6125 +
6126 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6127 + unsigned int addr;
6128 +
6129 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6130 + regs->pc = addr;
6131 + regs->npc = addr+4;
6132 + return 2;
6133 + }
6134 + }
6135 +
6136 + do { /* PaX: patched PLT emulation #3 */
6137 + unsigned int sethi, jmpl, nop;
6138 +
6139 + err = get_user(sethi, (unsigned int *)regs->pc);
6140 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6141 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6142 +
6143 + if (err)
6144 + break;
6145 +
6146 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6147 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6148 + nop == 0x01000000U)
6149 + {
6150 + unsigned int addr;
6151 +
6152 + addr = (sethi & 0x003FFFFFU) << 10;
6153 + regs->u_regs[UREG_G1] = addr;
6154 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6155 + regs->pc = addr;
6156 + regs->npc = addr+4;
6157 + return 2;
6158 + }
6159 + } while (0);
6160 +
6161 + do { /* PaX: unpatched PLT emulation step 1 */
6162 + unsigned int sethi, ba, nop;
6163 +
6164 + err = get_user(sethi, (unsigned int *)regs->pc);
6165 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6166 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6167 +
6168 + if (err)
6169 + break;
6170 +
6171 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6172 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6173 + nop == 0x01000000U)
6174 + {
6175 + unsigned int addr, save, call;
6176 +
6177 + if ((ba & 0xFFC00000U) == 0x30800000U)
6178 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6179 + else
6180 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6181 +
6182 + err = get_user(save, (unsigned int *)addr);
6183 + err |= get_user(call, (unsigned int *)(addr+4));
6184 + err |= get_user(nop, (unsigned int *)(addr+8));
6185 + if (err)
6186 + break;
6187 +
6188 +#ifdef CONFIG_PAX_DLRESOLVE
6189 + if (save == 0x9DE3BFA8U &&
6190 + (call & 0xC0000000U) == 0x40000000U &&
6191 + nop == 0x01000000U)
6192 + {
6193 + struct vm_area_struct *vma;
6194 + unsigned long call_dl_resolve;
6195 +
6196 + down_read(&current->mm->mmap_sem);
6197 + call_dl_resolve = current->mm->call_dl_resolve;
6198 + up_read(&current->mm->mmap_sem);
6199 + if (likely(call_dl_resolve))
6200 + goto emulate;
6201 +
6202 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6203 +
6204 + down_write(&current->mm->mmap_sem);
6205 + if (current->mm->call_dl_resolve) {
6206 + call_dl_resolve = current->mm->call_dl_resolve;
6207 + up_write(&current->mm->mmap_sem);
6208 + if (vma)
6209 + kmem_cache_free(vm_area_cachep, vma);
6210 + goto emulate;
6211 + }
6212 +
6213 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6214 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6215 + up_write(&current->mm->mmap_sem);
6216 + if (vma)
6217 + kmem_cache_free(vm_area_cachep, vma);
6218 + return 1;
6219 + }
6220 +
6221 + if (pax_insert_vma(vma, call_dl_resolve)) {
6222 + up_write(&current->mm->mmap_sem);
6223 + kmem_cache_free(vm_area_cachep, vma);
6224 + return 1;
6225 + }
6226 +
6227 + current->mm->call_dl_resolve = call_dl_resolve;
6228 + up_write(&current->mm->mmap_sem);
6229 +
6230 +emulate:
6231 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6232 + regs->pc = call_dl_resolve;
6233 + regs->npc = addr+4;
6234 + return 3;
6235 + }
6236 +#endif
6237 +
6238 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6239 + if ((save & 0xFFC00000U) == 0x05000000U &&
6240 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6241 + nop == 0x01000000U)
6242 + {
6243 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6244 + regs->u_regs[UREG_G2] = addr + 4;
6245 + addr = (save & 0x003FFFFFU) << 10;
6246 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6247 + regs->pc = addr;
6248 + regs->npc = addr+4;
6249 + return 3;
6250 + }
6251 + }
6252 + } while (0);
6253 +
6254 + do { /* PaX: unpatched PLT emulation step 2 */
6255 + unsigned int save, call, nop;
6256 +
6257 + err = get_user(save, (unsigned int *)(regs->pc-4));
6258 + err |= get_user(call, (unsigned int *)regs->pc);
6259 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6260 + if (err)
6261 + break;
6262 +
6263 + if (save == 0x9DE3BFA8U &&
6264 + (call & 0xC0000000U) == 0x40000000U &&
6265 + nop == 0x01000000U)
6266 + {
6267 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6268 +
6269 + regs->u_regs[UREG_RETPC] = regs->pc;
6270 + regs->pc = dl_resolve;
6271 + regs->npc = dl_resolve+4;
6272 + return 3;
6273 + }
6274 + } while (0);
6275 +#endif
6276 +
6277 + return 1;
6278 +}
6279 +
6280 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6281 +{
6282 + unsigned long i;
6283 +
6284 + printk(KERN_ERR "PAX: bytes at PC: ");
6285 + for (i = 0; i < 8; i++) {
6286 + unsigned int c;
6287 + if (get_user(c, (unsigned int *)pc+i))
6288 + printk(KERN_CONT "???????? ");
6289 + else
6290 + printk(KERN_CONT "%08x ", c);
6291 + }
6292 + printk("\n");
6293 +}
6294 +#endif
6295 +
6296 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6297 unsigned long address)
6298 {
6299 @@ -231,6 +495,24 @@ good_area:
6300 if(!(vma->vm_flags & VM_WRITE))
6301 goto bad_area;
6302 } else {
6303 +
6304 +#ifdef CONFIG_PAX_PAGEEXEC
6305 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6306 + up_read(&mm->mmap_sem);
6307 + switch (pax_handle_fetch_fault(regs)) {
6308 +
6309 +#ifdef CONFIG_PAX_EMUPLT
6310 + case 2:
6311 + case 3:
6312 + return;
6313 +#endif
6314 +
6315 + }
6316 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6317 + do_group_exit(SIGKILL);
6318 + }
6319 +#endif
6320 +
6321 /* Allow reads even for write-only mappings */
6322 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6323 goto bad_area;
6324 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6325 index 43b0da9..a0b78f9 100644
6326 --- a/arch/sparc/mm/fault_64.c
6327 +++ b/arch/sparc/mm/fault_64.c
6328 @@ -20,6 +20,9 @@
6329 #include <linux/kprobes.h>
6330 #include <linux/kdebug.h>
6331 #include <linux/percpu.h>
6332 +#include <linux/slab.h>
6333 +#include <linux/pagemap.h>
6334 +#include <linux/compiler.h>
6335
6336 #include <asm/page.h>
6337 #include <asm/pgtable.h>
6338 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6339 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6340 regs->tpc);
6341 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6342 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6343 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6344 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6345 dump_stack();
6346 unhandled_fault(regs->tpc, current, regs);
6347 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6348 show_regs(regs);
6349 }
6350
6351 +#ifdef CONFIG_PAX_PAGEEXEC
6352 +#ifdef CONFIG_PAX_DLRESOLVE
6353 +static void pax_emuplt_close(struct vm_area_struct *vma)
6354 +{
6355 + vma->vm_mm->call_dl_resolve = 0UL;
6356 +}
6357 +
6358 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6359 +{
6360 + unsigned int *kaddr;
6361 +
6362 + vmf->page = alloc_page(GFP_HIGHUSER);
6363 + if (!vmf->page)
6364 + return VM_FAULT_OOM;
6365 +
6366 + kaddr = kmap(vmf->page);
6367 + memset(kaddr, 0, PAGE_SIZE);
6368 + kaddr[0] = 0x9DE3BFA8U; /* save */
6369 + flush_dcache_page(vmf->page);
6370 + kunmap(vmf->page);
6371 + return VM_FAULT_MAJOR;
6372 +}
6373 +
6374 +static const struct vm_operations_struct pax_vm_ops = {
6375 + .close = pax_emuplt_close,
6376 + .fault = pax_emuplt_fault
6377 +};
6378 +
6379 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6380 +{
6381 + int ret;
6382 +
6383 + vma->vm_mm = current->mm;
6384 + vma->vm_start = addr;
6385 + vma->vm_end = addr + PAGE_SIZE;
6386 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6387 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6388 + vma->vm_ops = &pax_vm_ops;
6389 +
6390 + ret = insert_vm_struct(current->mm, vma);
6391 + if (ret)
6392 + return ret;
6393 +
6394 + ++current->mm->total_vm;
6395 + return 0;
6396 +}
6397 +#endif
6398 +
6399 +/*
6400 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6401 + *
6402 + * returns 1 when task should be killed
6403 + * 2 when patched PLT trampoline was detected
6404 + * 3 when unpatched PLT trampoline was detected
6405 + */
6406 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6407 +{
6408 +
6409 +#ifdef CONFIG_PAX_EMUPLT
6410 + int err;
6411 +
6412 + do { /* PaX: patched PLT emulation #1 */
6413 + unsigned int sethi1, sethi2, jmpl;
6414 +
6415 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6416 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6417 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6418 +
6419 + if (err)
6420 + break;
6421 +
6422 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6423 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6424 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6425 + {
6426 + unsigned long addr;
6427 +
6428 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6429 + addr = regs->u_regs[UREG_G1];
6430 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6431 +
6432 + if (test_thread_flag(TIF_32BIT))
6433 + addr &= 0xFFFFFFFFUL;
6434 +
6435 + regs->tpc = addr;
6436 + regs->tnpc = addr+4;
6437 + return 2;
6438 + }
6439 + } while (0);
6440 +
6441 + { /* PaX: patched PLT emulation #2 */
6442 + unsigned int ba;
6443 +
6444 + err = get_user(ba, (unsigned int *)regs->tpc);
6445 +
6446 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6447 + unsigned long addr;
6448 +
6449 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6450 +
6451 + if (test_thread_flag(TIF_32BIT))
6452 + addr &= 0xFFFFFFFFUL;
6453 +
6454 + regs->tpc = addr;
6455 + regs->tnpc = addr+4;
6456 + return 2;
6457 + }
6458 + }
6459 +
6460 + do { /* PaX: patched PLT emulation #3 */
6461 + unsigned int sethi, jmpl, nop;
6462 +
6463 + err = get_user(sethi, (unsigned int *)regs->tpc);
6464 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6465 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6466 +
6467 + if (err)
6468 + break;
6469 +
6470 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6471 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6472 + nop == 0x01000000U)
6473 + {
6474 + unsigned long addr;
6475 +
6476 + addr = (sethi & 0x003FFFFFU) << 10;
6477 + regs->u_regs[UREG_G1] = addr;
6478 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6479 +
6480 + if (test_thread_flag(TIF_32BIT))
6481 + addr &= 0xFFFFFFFFUL;
6482 +
6483 + regs->tpc = addr;
6484 + regs->tnpc = addr+4;
6485 + return 2;
6486 + }
6487 + } while (0);
6488 +
6489 + do { /* PaX: patched PLT emulation #4 */
6490 + unsigned int sethi, mov1, call, mov2;
6491 +
6492 + err = get_user(sethi, (unsigned int *)regs->tpc);
6493 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6494 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6495 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6496 +
6497 + if (err)
6498 + break;
6499 +
6500 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6501 + mov1 == 0x8210000FU &&
6502 + (call & 0xC0000000U) == 0x40000000U &&
6503 + mov2 == 0x9E100001U)
6504 + {
6505 + unsigned long addr;
6506 +
6507 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6508 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6509 +
6510 + if (test_thread_flag(TIF_32BIT))
6511 + addr &= 0xFFFFFFFFUL;
6512 +
6513 + regs->tpc = addr;
6514 + regs->tnpc = addr+4;
6515 + return 2;
6516 + }
6517 + } while (0);
6518 +
6519 + do { /* PaX: patched PLT emulation #5 */
6520 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6521 +
6522 + err = get_user(sethi, (unsigned int *)regs->tpc);
6523 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6524 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6525 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6526 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6527 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6528 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6529 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6530 +
6531 + if (err)
6532 + break;
6533 +
6534 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6535 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6536 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6537 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6538 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6539 + sllx == 0x83287020U &&
6540 + jmpl == 0x81C04005U &&
6541 + nop == 0x01000000U)
6542 + {
6543 + unsigned long addr;
6544 +
6545 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6546 + regs->u_regs[UREG_G1] <<= 32;
6547 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6548 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6549 + regs->tpc = addr;
6550 + regs->tnpc = addr+4;
6551 + return 2;
6552 + }
6553 + } while (0);
6554 +
6555 + do { /* PaX: patched PLT emulation #6 */
6556 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6557 +
6558 + err = get_user(sethi, (unsigned int *)regs->tpc);
6559 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6560 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6561 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6562 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6563 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6564 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6565 +
6566 + if (err)
6567 + break;
6568 +
6569 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6570 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6571 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6572 + sllx == 0x83287020U &&
6573 + (or & 0xFFFFE000U) == 0x8A116000U &&
6574 + jmpl == 0x81C04005U &&
6575 + nop == 0x01000000U)
6576 + {
6577 + unsigned long addr;
6578 +
6579 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6580 + regs->u_regs[UREG_G1] <<= 32;
6581 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6582 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6583 + regs->tpc = addr;
6584 + regs->tnpc = addr+4;
6585 + return 2;
6586 + }
6587 + } while (0);
6588 +
6589 + do { /* PaX: unpatched PLT emulation step 1 */
6590 + unsigned int sethi, ba, nop;
6591 +
6592 + err = get_user(sethi, (unsigned int *)regs->tpc);
6593 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6594 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6595 +
6596 + if (err)
6597 + break;
6598 +
6599 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6600 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6601 + nop == 0x01000000U)
6602 + {
6603 + unsigned long addr;
6604 + unsigned int save, call;
6605 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6606 +
6607 + if ((ba & 0xFFC00000U) == 0x30800000U)
6608 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6609 + else
6610 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6611 +
6612 + if (test_thread_flag(TIF_32BIT))
6613 + addr &= 0xFFFFFFFFUL;
6614 +
6615 + err = get_user(save, (unsigned int *)addr);
6616 + err |= get_user(call, (unsigned int *)(addr+4));
6617 + err |= get_user(nop, (unsigned int *)(addr+8));
6618 + if (err)
6619 + break;
6620 +
6621 +#ifdef CONFIG_PAX_DLRESOLVE
6622 + if (save == 0x9DE3BFA8U &&
6623 + (call & 0xC0000000U) == 0x40000000U &&
6624 + nop == 0x01000000U)
6625 + {
6626 + struct vm_area_struct *vma;
6627 + unsigned long call_dl_resolve;
6628 +
6629 + down_read(&current->mm->mmap_sem);
6630 + call_dl_resolve = current->mm->call_dl_resolve;
6631 + up_read(&current->mm->mmap_sem);
6632 + if (likely(call_dl_resolve))
6633 + goto emulate;
6634 +
6635 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6636 +
6637 + down_write(&current->mm->mmap_sem);
6638 + if (current->mm->call_dl_resolve) {
6639 + call_dl_resolve = current->mm->call_dl_resolve;
6640 + up_write(&current->mm->mmap_sem);
6641 + if (vma)
6642 + kmem_cache_free(vm_area_cachep, vma);
6643 + goto emulate;
6644 + }
6645 +
6646 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6647 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6648 + up_write(&current->mm->mmap_sem);
6649 + if (vma)
6650 + kmem_cache_free(vm_area_cachep, vma);
6651 + return 1;
6652 + }
6653 +
6654 + if (pax_insert_vma(vma, call_dl_resolve)) {
6655 + up_write(&current->mm->mmap_sem);
6656 + kmem_cache_free(vm_area_cachep, vma);
6657 + return 1;
6658 + }
6659 +
6660 + current->mm->call_dl_resolve = call_dl_resolve;
6661 + up_write(&current->mm->mmap_sem);
6662 +
6663 +emulate:
6664 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6665 + regs->tpc = call_dl_resolve;
6666 + regs->tnpc = addr+4;
6667 + return 3;
6668 + }
6669 +#endif
6670 +
6671 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6672 + if ((save & 0xFFC00000U) == 0x05000000U &&
6673 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6674 + nop == 0x01000000U)
6675 + {
6676 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6677 + regs->u_regs[UREG_G2] = addr + 4;
6678 + addr = (save & 0x003FFFFFU) << 10;
6679 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6680 +
6681 + if (test_thread_flag(TIF_32BIT))
6682 + addr &= 0xFFFFFFFFUL;
6683 +
6684 + regs->tpc = addr;
6685 + regs->tnpc = addr+4;
6686 + return 3;
6687 + }
6688 +
6689 + /* PaX: 64-bit PLT stub */
6690 + err = get_user(sethi1, (unsigned int *)addr);
6691 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6692 + err |= get_user(or1, (unsigned int *)(addr+8));
6693 + err |= get_user(or2, (unsigned int *)(addr+12));
6694 + err |= get_user(sllx, (unsigned int *)(addr+16));
6695 + err |= get_user(add, (unsigned int *)(addr+20));
6696 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6697 + err |= get_user(nop, (unsigned int *)(addr+28));
6698 + if (err)
6699 + break;
6700 +
6701 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6702 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6703 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6704 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6705 + sllx == 0x89293020U &&
6706 + add == 0x8A010005U &&
6707 + jmpl == 0x89C14000U &&
6708 + nop == 0x01000000U)
6709 + {
6710 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6711 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6712 + regs->u_regs[UREG_G4] <<= 32;
6713 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6714 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6715 + regs->u_regs[UREG_G4] = addr + 24;
6716 + addr = regs->u_regs[UREG_G5];
6717 + regs->tpc = addr;
6718 + regs->tnpc = addr+4;
6719 + return 3;
6720 + }
6721 + }
6722 + } while (0);
6723 +
6724 +#ifdef CONFIG_PAX_DLRESOLVE
6725 + do { /* PaX: unpatched PLT emulation step 2 */
6726 + unsigned int save, call, nop;
6727 +
6728 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6729 + err |= get_user(call, (unsigned int *)regs->tpc);
6730 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6731 + if (err)
6732 + break;
6733 +
6734 + if (save == 0x9DE3BFA8U &&
6735 + (call & 0xC0000000U) == 0x40000000U &&
6736 + nop == 0x01000000U)
6737 + {
6738 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6739 +
6740 + if (test_thread_flag(TIF_32BIT))
6741 + dl_resolve &= 0xFFFFFFFFUL;
6742 +
6743 + regs->u_regs[UREG_RETPC] = regs->tpc;
6744 + regs->tpc = dl_resolve;
6745 + regs->tnpc = dl_resolve+4;
6746 + return 3;
6747 + }
6748 + } while (0);
6749 +#endif
6750 +
6751 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6752 + unsigned int sethi, ba, nop;
6753 +
6754 + err = get_user(sethi, (unsigned int *)regs->tpc);
6755 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6756 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6757 +
6758 + if (err)
6759 + break;
6760 +
6761 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6762 + (ba & 0xFFF00000U) == 0x30600000U &&
6763 + nop == 0x01000000U)
6764 + {
6765 + unsigned long addr;
6766 +
6767 + addr = (sethi & 0x003FFFFFU) << 10;
6768 + regs->u_regs[UREG_G1] = addr;
6769 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6770 +
6771 + if (test_thread_flag(TIF_32BIT))
6772 + addr &= 0xFFFFFFFFUL;
6773 +
6774 + regs->tpc = addr;
6775 + regs->tnpc = addr+4;
6776 + return 2;
6777 + }
6778 + } while (0);
6779 +
6780 +#endif
6781 +
6782 + return 1;
6783 +}
6784 +
6785 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6786 +{
6787 + unsigned long i;
6788 +
6789 + printk(KERN_ERR "PAX: bytes at PC: ");
6790 + for (i = 0; i < 8; i++) {
6791 + unsigned int c;
6792 + if (get_user(c, (unsigned int *)pc+i))
6793 + printk(KERN_CONT "???????? ");
6794 + else
6795 + printk(KERN_CONT "%08x ", c);
6796 + }
6797 + printk("\n");
6798 +}
6799 +#endif
6800 +
6801 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6802 {
6803 struct mm_struct *mm = current->mm;
6804 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6805 if (!vma)
6806 goto bad_area;
6807
6808 +#ifdef CONFIG_PAX_PAGEEXEC
6809 + /* PaX: detect ITLB misses on non-exec pages */
6810 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6811 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6812 + {
6813 + if (address != regs->tpc)
6814 + goto good_area;
6815 +
6816 + up_read(&mm->mmap_sem);
6817 + switch (pax_handle_fetch_fault(regs)) {
6818 +
6819 +#ifdef CONFIG_PAX_EMUPLT
6820 + case 2:
6821 + case 3:
6822 + return;
6823 +#endif
6824 +
6825 + }
6826 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6827 + do_group_exit(SIGKILL);
6828 + }
6829 +#endif
6830 +
6831 /* Pure DTLB misses do not tell us whether the fault causing
6832 * load/store/atomic was a write or not, it only says that there
6833 * was no match. So in such a case we (carefully) read the
6834 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6835 index f27d103..1b06377 100644
6836 --- a/arch/sparc/mm/hugetlbpage.c
6837 +++ b/arch/sparc/mm/hugetlbpage.c
6838 @@ -69,7 +69,7 @@ full_search:
6839 }
6840 return -ENOMEM;
6841 }
6842 - if (likely(!vma || addr + len <= vma->vm_start)) {
6843 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6844 /*
6845 * Remember the place where we stopped the search:
6846 */
6847 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6848 /* make sure it can fit in the remaining address space */
6849 if (likely(addr > len)) {
6850 vma = find_vma(mm, addr-len);
6851 - if (!vma || addr <= vma->vm_start) {
6852 + if (check_heap_stack_gap(vma, addr - len, len)) {
6853 /* remember the address as a hint for next time */
6854 return (mm->free_area_cache = addr-len);
6855 }
6856 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6857 if (unlikely(mm->mmap_base < len))
6858 goto bottomup;
6859
6860 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6861 + addr = mm->mmap_base - len;
6862
6863 do {
6864 + addr &= HPAGE_MASK;
6865 /*
6866 * Lookup failure means no vma is above this address,
6867 * else if new region fits below vma->vm_start,
6868 * return with success:
6869 */
6870 vma = find_vma(mm, addr);
6871 - if (likely(!vma || addr+len <= vma->vm_start)) {
6872 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6873 /* remember the address as a hint for next time */
6874 return (mm->free_area_cache = addr);
6875 }
6876 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6877 mm->cached_hole_size = vma->vm_start - addr;
6878
6879 /* try just below the current vma->vm_start */
6880 - addr = (vma->vm_start-len) & HPAGE_MASK;
6881 - } while (likely(len < vma->vm_start));
6882 + addr = skip_heap_stack_gap(vma, len);
6883 + } while (!IS_ERR_VALUE(addr));
6884
6885 bottomup:
6886 /*
6887 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6888 if (addr) {
6889 addr = ALIGN(addr, HPAGE_SIZE);
6890 vma = find_vma(mm, addr);
6891 - if (task_size - len >= addr &&
6892 - (!vma || addr + len <= vma->vm_start))
6893 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6894 return addr;
6895 }
6896 if (mm->get_unmapped_area == arch_get_unmapped_area)
6897 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6898 index dc7c3b1..34c0070 100644
6899 --- a/arch/sparc/mm/init_32.c
6900 +++ b/arch/sparc/mm/init_32.c
6901 @@ -317,6 +317,9 @@ extern void device_scan(void);
6902 pgprot_t PAGE_SHARED __read_mostly;
6903 EXPORT_SYMBOL(PAGE_SHARED);
6904
6905 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6906 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6907 +
6908 void __init paging_init(void)
6909 {
6910 switch(sparc_cpu_model) {
6911 @@ -345,17 +348,17 @@ void __init paging_init(void)
6912
6913 /* Initialize the protection map with non-constant, MMU dependent values. */
6914 protection_map[0] = PAGE_NONE;
6915 - protection_map[1] = PAGE_READONLY;
6916 - protection_map[2] = PAGE_COPY;
6917 - protection_map[3] = PAGE_COPY;
6918 + protection_map[1] = PAGE_READONLY_NOEXEC;
6919 + protection_map[2] = PAGE_COPY_NOEXEC;
6920 + protection_map[3] = PAGE_COPY_NOEXEC;
6921 protection_map[4] = PAGE_READONLY;
6922 protection_map[5] = PAGE_READONLY;
6923 protection_map[6] = PAGE_COPY;
6924 protection_map[7] = PAGE_COPY;
6925 protection_map[8] = PAGE_NONE;
6926 - protection_map[9] = PAGE_READONLY;
6927 - protection_map[10] = PAGE_SHARED;
6928 - protection_map[11] = PAGE_SHARED;
6929 + protection_map[9] = PAGE_READONLY_NOEXEC;
6930 + protection_map[10] = PAGE_SHARED_NOEXEC;
6931 + protection_map[11] = PAGE_SHARED_NOEXEC;
6932 protection_map[12] = PAGE_READONLY;
6933 protection_map[13] = PAGE_READONLY;
6934 protection_map[14] = PAGE_SHARED;
6935 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6936 index 509b1ff..bfd7118 100644
6937 --- a/arch/sparc/mm/srmmu.c
6938 +++ b/arch/sparc/mm/srmmu.c
6939 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6940 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6941 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6942 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6943 +
6944 +#ifdef CONFIG_PAX_PAGEEXEC
6945 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6946 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6947 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6948 +#endif
6949 +
6950 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6951 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6952
6953 diff --git a/arch/um/Makefile b/arch/um/Makefile
6954 index fc633db..5e1a1c2 100644
6955 --- a/arch/um/Makefile
6956 +++ b/arch/um/Makefile
6957 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6958 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6959 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6960
6961 +ifdef CONSTIFY_PLUGIN
6962 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6963 +endif
6964 +
6965 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6966
6967 #This will adjust *FLAGS accordingly to the platform.
6968 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6969 index 6c03acd..a5e0215 100644
6970 --- a/arch/um/include/asm/kmap_types.h
6971 +++ b/arch/um/include/asm/kmap_types.h
6972 @@ -23,6 +23,7 @@ enum km_type {
6973 KM_IRQ1,
6974 KM_SOFTIRQ0,
6975 KM_SOFTIRQ1,
6976 + KM_CLEARPAGE,
6977 KM_TYPE_NR
6978 };
6979
6980 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6981 index 4cc9b6c..02e5029 100644
6982 --- a/arch/um/include/asm/page.h
6983 +++ b/arch/um/include/asm/page.h
6984 @@ -14,6 +14,9 @@
6985 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6986 #define PAGE_MASK (~(PAGE_SIZE-1))
6987
6988 +#define ktla_ktva(addr) (addr)
6989 +#define ktva_ktla(addr) (addr)
6990 +
6991 #ifndef __ASSEMBLY__
6992
6993 struct page;
6994 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6995 index 4a28a15..654dc2a 100644
6996 --- a/arch/um/kernel/process.c
6997 +++ b/arch/um/kernel/process.c
6998 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6999 return 2;
7000 }
7001
7002 -/*
7003 - * Only x86 and x86_64 have an arch_align_stack().
7004 - * All other arches have "#define arch_align_stack(x) (x)"
7005 - * in their asm/system.h
7006 - * As this is included in UML from asm-um/system-generic.h,
7007 - * we can use it to behave as the subarch does.
7008 - */
7009 -#ifndef arch_align_stack
7010 -unsigned long arch_align_stack(unsigned long sp)
7011 -{
7012 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7013 - sp -= get_random_int() % 8192;
7014 - return sp & ~0xf;
7015 -}
7016 -#endif
7017 -
7018 unsigned long get_wchan(struct task_struct *p)
7019 {
7020 unsigned long stack_page, sp, ip;
7021 diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
7022 index d1b93c4..ae1b7fd 100644
7023 --- a/arch/um/sys-i386/shared/sysdep/system.h
7024 +++ b/arch/um/sys-i386/shared/sysdep/system.h
7025 @@ -17,7 +17,7 @@
7026 # define AT_VECTOR_SIZE_ARCH 1
7027 #endif
7028
7029 -extern unsigned long arch_align_stack(unsigned long sp);
7030 +#define arch_align_stack(x) ((x) & ~0xfUL)
7031
7032 void default_idle(void);
7033
7034 diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
7035 index 857ca0b..9a2669d 100644
7036 --- a/arch/um/sys-i386/syscalls.c
7037 +++ b/arch/um/sys-i386/syscalls.c
7038 @@ -11,6 +11,21 @@
7039 #include "asm/uaccess.h"
7040 #include "asm/unistd.h"
7041
7042 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
7043 +{
7044 + unsigned long pax_task_size = TASK_SIZE;
7045 +
7046 +#ifdef CONFIG_PAX_SEGMEXEC
7047 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
7048 + pax_task_size = SEGMEXEC_TASK_SIZE;
7049 +#endif
7050 +
7051 + if (len > pax_task_size || addr > pax_task_size - len)
7052 + return -EINVAL;
7053 +
7054 + return 0;
7055 +}
7056 +
7057 /*
7058 * Perform the select(nd, in, out, ex, tv) and mmap() system
7059 * calls. Linux/i386 didn't use to be able to handle more than
7060 diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
7061 index d1b93c4..ae1b7fd 100644
7062 --- a/arch/um/sys-x86_64/shared/sysdep/system.h
7063 +++ b/arch/um/sys-x86_64/shared/sysdep/system.h
7064 @@ -17,7 +17,7 @@
7065 # define AT_VECTOR_SIZE_ARCH 1
7066 #endif
7067
7068 -extern unsigned long arch_align_stack(unsigned long sp);
7069 +#define arch_align_stack(x) ((x) & ~0xfUL)
7070
7071 void default_idle(void);
7072
7073 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7074 index 73ae02a..f932de5 100644
7075 --- a/arch/x86/Kconfig
7076 +++ b/arch/x86/Kconfig
7077 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
7078
7079 config X86_32_LAZY_GS
7080 def_bool y
7081 - depends on X86_32 && !CC_STACKPROTECTOR
7082 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7083
7084 config KTIME_SCALAR
7085 def_bool X86_32
7086 @@ -1008,7 +1008,7 @@ choice
7087
7088 config NOHIGHMEM
7089 bool "off"
7090 - depends on !X86_NUMAQ
7091 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7092 ---help---
7093 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7094 However, the address space of 32-bit x86 processors is only 4
7095 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
7096
7097 config HIGHMEM4G
7098 bool "4GB"
7099 - depends on !X86_NUMAQ
7100 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7101 ---help---
7102 Select this if you have a 32-bit processor and between 1 and 4
7103 gigabytes of physical RAM.
7104 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
7105 hex
7106 default 0xB0000000 if VMSPLIT_3G_OPT
7107 default 0x80000000 if VMSPLIT_2G
7108 - default 0x78000000 if VMSPLIT_2G_OPT
7109 + default 0x70000000 if VMSPLIT_2G_OPT
7110 default 0x40000000 if VMSPLIT_1G
7111 default 0xC0000000
7112 depends on X86_32
7113 @@ -1460,6 +1460,7 @@ config SECCOMP
7114
7115 config CC_STACKPROTECTOR
7116 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7117 + depends on X86_64 || !PAX_MEMORY_UDEREF
7118 ---help---
7119 This option turns on the -fstack-protector GCC feature. This
7120 feature puts, at the beginning of functions, a canary value on
7121 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
7122 config PHYSICAL_START
7123 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
7124 default "0x1000000"
7125 + range 0x400000 0x40000000
7126 ---help---
7127 This gives the physical address where the kernel is loaded.
7128
7129 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
7130 hex
7131 prompt "Alignment value to which kernel should be aligned" if X86_32
7132 default "0x1000000"
7133 + range 0x400000 0x1000000 if PAX_KERNEXEC
7134 range 0x2000 0x1000000
7135 ---help---
7136 This value puts the alignment restrictions on physical address
7137 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
7138 Say N if you want to disable CPU hotplug.
7139
7140 config COMPAT_VDSO
7141 - def_bool y
7142 + def_bool n
7143 prompt "Compat VDSO support"
7144 depends on X86_32 || IA32_EMULATION
7145 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7146 ---help---
7147 Map the 32-bit VDSO to the predictable old-style address too.
7148 ---help---
7149 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7150 index 0e566103..1a6b57e 100644
7151 --- a/arch/x86/Kconfig.cpu
7152 +++ b/arch/x86/Kconfig.cpu
7153 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
7154
7155 config X86_F00F_BUG
7156 def_bool y
7157 - depends on M586MMX || M586TSC || M586 || M486 || M386
7158 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7159
7160 config X86_WP_WORKS_OK
7161 def_bool y
7162 @@ -360,7 +360,7 @@ config X86_POPAD_OK
7163
7164 config X86_ALIGNMENT_16
7165 def_bool y
7166 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7167 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7168
7169 config X86_INTEL_USERCOPY
7170 def_bool y
7171 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
7172 # generates cmov.
7173 config X86_CMOV
7174 def_bool y
7175 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7176 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7177
7178 config X86_MINIMUM_CPU_FAMILY
7179 int
7180 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7181 index d105f29..c928727 100644
7182 --- a/arch/x86/Kconfig.debug
7183 +++ b/arch/x86/Kconfig.debug
7184 @@ -99,7 +99,7 @@ config X86_PTDUMP
7185 config DEBUG_RODATA
7186 bool "Write protect kernel read-only data structures"
7187 default y
7188 - depends on DEBUG_KERNEL
7189 + depends on DEBUG_KERNEL && BROKEN
7190 ---help---
7191 Mark the kernel read-only data as write-protected in the pagetables,
7192 in order to catch accidental (and incorrect) writes to such const
7193 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7194 index d2d24c9..0f21f8d 100644
7195 --- a/arch/x86/Makefile
7196 +++ b/arch/x86/Makefile
7197 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
7198 else
7199 BITS := 64
7200 UTS_MACHINE := x86_64
7201 + biarch := $(call cc-option,-m64)
7202 CHECKFLAGS += -D__x86_64__ -m64
7203
7204 KBUILD_AFLAGS += -m64
7205 @@ -189,3 +190,12 @@ define archhelp
7206 echo ' FDARGS="..." arguments for the booted kernel'
7207 echo ' FDINITRD=file initrd for the booted kernel'
7208 endef
7209 +
7210 +define OLD_LD
7211 +
7212 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7213 +*** Please upgrade your binutils to 2.18 or newer
7214 +endef
7215 +
7216 +archprepare:
7217 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7218 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7219 index ec749c2..bbb5319 100644
7220 --- a/arch/x86/boot/Makefile
7221 +++ b/arch/x86/boot/Makefile
7222 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7223 $(call cc-option, -fno-stack-protector) \
7224 $(call cc-option, -mpreferred-stack-boundary=2)
7225 KBUILD_CFLAGS += $(call cc-option, -m32)
7226 +ifdef CONSTIFY_PLUGIN
7227 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7228 +endif
7229 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7230 GCOV_PROFILE := n
7231
7232 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7233 index 878e4b9..20537ab 100644
7234 --- a/arch/x86/boot/bitops.h
7235 +++ b/arch/x86/boot/bitops.h
7236 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7237 u8 v;
7238 const u32 *p = (const u32 *)addr;
7239
7240 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7241 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7242 return v;
7243 }
7244
7245 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7246
7247 static inline void set_bit(int nr, void *addr)
7248 {
7249 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7250 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7251 }
7252
7253 #endif /* BOOT_BITOPS_H */
7254 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7255 index 98239d2..f40214c 100644
7256 --- a/arch/x86/boot/boot.h
7257 +++ b/arch/x86/boot/boot.h
7258 @@ -82,7 +82,7 @@ static inline void io_delay(void)
7259 static inline u16 ds(void)
7260 {
7261 u16 seg;
7262 - asm("movw %%ds,%0" : "=rm" (seg));
7263 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7264 return seg;
7265 }
7266
7267 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7268 static inline int memcmp(const void *s1, const void *s2, size_t len)
7269 {
7270 u8 diff;
7271 - asm("repe; cmpsb; setnz %0"
7272 + asm volatile("repe; cmpsb; setnz %0"
7273 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7274 return diff;
7275 }
7276 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7277 index f8ed065..5bf5ff3 100644
7278 --- a/arch/x86/boot/compressed/Makefile
7279 +++ b/arch/x86/boot/compressed/Makefile
7280 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7281 KBUILD_CFLAGS += $(cflags-y)
7282 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7283 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7284 +ifdef CONSTIFY_PLUGIN
7285 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7286 +endif
7287
7288 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7289 GCOV_PROFILE := n
7290 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7291 index f543b70..b60fba8 100644
7292 --- a/arch/x86/boot/compressed/head_32.S
7293 +++ b/arch/x86/boot/compressed/head_32.S
7294 @@ -76,7 +76,7 @@ ENTRY(startup_32)
7295 notl %eax
7296 andl %eax, %ebx
7297 #else
7298 - movl $LOAD_PHYSICAL_ADDR, %ebx
7299 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7300 #endif
7301
7302 /* Target address to relocate to for decompression */
7303 @@ -149,7 +149,7 @@ relocated:
7304 * and where it was actually loaded.
7305 */
7306 movl %ebp, %ebx
7307 - subl $LOAD_PHYSICAL_ADDR, %ebx
7308 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7309 jz 2f /* Nothing to be done if loaded at compiled addr. */
7310 /*
7311 * Process relocations.
7312 @@ -157,8 +157,7 @@ relocated:
7313
7314 1: subl $4, %edi
7315 movl (%edi), %ecx
7316 - testl %ecx, %ecx
7317 - jz 2f
7318 + jecxz 2f
7319 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7320 jmp 1b
7321 2:
7322 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7323 index 077e1b6..2c6b13b 100644
7324 --- a/arch/x86/boot/compressed/head_64.S
7325 +++ b/arch/x86/boot/compressed/head_64.S
7326 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7327 notl %eax
7328 andl %eax, %ebx
7329 #else
7330 - movl $LOAD_PHYSICAL_ADDR, %ebx
7331 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7332 #endif
7333
7334 /* Target address to relocate to for decompression */
7335 @@ -183,7 +183,7 @@ no_longmode:
7336 hlt
7337 jmp 1b
7338
7339 -#include "../../kernel/verify_cpu_64.S"
7340 +#include "../../kernel/verify_cpu.S"
7341
7342 /*
7343 * Be careful here startup_64 needs to be at a predictable
7344 @@ -234,7 +234,7 @@ ENTRY(startup_64)
7345 notq %rax
7346 andq %rax, %rbp
7347 #else
7348 - movq $LOAD_PHYSICAL_ADDR, %rbp
7349 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7350 #endif
7351
7352 /* Target address to relocate to for decompression */
7353 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7354 index 842b2a3..f00178b 100644
7355 --- a/arch/x86/boot/compressed/misc.c
7356 +++ b/arch/x86/boot/compressed/misc.c
7357 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
7358 case PT_LOAD:
7359 #ifdef CONFIG_RELOCATABLE
7360 dest = output;
7361 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7362 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7363 #else
7364 dest = (void *)(phdr->p_paddr);
7365 #endif
7366 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7367 error("Destination address too large");
7368 #endif
7369 #ifndef CONFIG_RELOCATABLE
7370 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7371 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7372 error("Wrong destination address");
7373 #endif
7374
7375 diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7376 index bcbd36c..b1754af 100644
7377 --- a/arch/x86/boot/compressed/mkpiggy.c
7378 +++ b/arch/x86/boot/compressed/mkpiggy.c
7379 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7380
7381 offs = (olen > ilen) ? olen - ilen : 0;
7382 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7383 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7384 + offs += 64*1024; /* Add 64K bytes slack */
7385 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7386
7387 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7388 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7389 index bbeb0c3..f5167ab 100644
7390 --- a/arch/x86/boot/compressed/relocs.c
7391 +++ b/arch/x86/boot/compressed/relocs.c
7392 @@ -10,8 +10,11 @@
7393 #define USE_BSD
7394 #include <endian.h>
7395
7396 +#include "../../../../include/linux/autoconf.h"
7397 +
7398 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7399 static Elf32_Ehdr ehdr;
7400 +static Elf32_Phdr *phdr;
7401 static unsigned long reloc_count, reloc_idx;
7402 static unsigned long *relocs;
7403
7404 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7405
7406 static int is_safe_abs_reloc(const char* sym_name)
7407 {
7408 - int i;
7409 + unsigned int i;
7410
7411 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7412 if (!strcmp(sym_name, safe_abs_relocs[i]))
7413 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7414 }
7415 }
7416
7417 +static void read_phdrs(FILE *fp)
7418 +{
7419 + unsigned int i;
7420 +
7421 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7422 + if (!phdr) {
7423 + die("Unable to allocate %d program headers\n",
7424 + ehdr.e_phnum);
7425 + }
7426 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7427 + die("Seek to %d failed: %s\n",
7428 + ehdr.e_phoff, strerror(errno));
7429 + }
7430 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7431 + die("Cannot read ELF program headers: %s\n",
7432 + strerror(errno));
7433 + }
7434 + for(i = 0; i < ehdr.e_phnum; i++) {
7435 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7436 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7437 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7438 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7439 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7440 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7441 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7442 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7443 + }
7444 +
7445 +}
7446 +
7447 static void read_shdrs(FILE *fp)
7448 {
7449 - int i;
7450 + unsigned int i;
7451 Elf32_Shdr shdr;
7452
7453 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7454 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7455
7456 static void read_strtabs(FILE *fp)
7457 {
7458 - int i;
7459 + unsigned int i;
7460 for (i = 0; i < ehdr.e_shnum; i++) {
7461 struct section *sec = &secs[i];
7462 if (sec->shdr.sh_type != SHT_STRTAB) {
7463 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7464
7465 static void read_symtabs(FILE *fp)
7466 {
7467 - int i,j;
7468 + unsigned int i,j;
7469 for (i = 0; i < ehdr.e_shnum; i++) {
7470 struct section *sec = &secs[i];
7471 if (sec->shdr.sh_type != SHT_SYMTAB) {
7472 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7473
7474 static void read_relocs(FILE *fp)
7475 {
7476 - int i,j;
7477 + unsigned int i,j;
7478 + uint32_t base;
7479 +
7480 for (i = 0; i < ehdr.e_shnum; i++) {
7481 struct section *sec = &secs[i];
7482 if (sec->shdr.sh_type != SHT_REL) {
7483 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7484 die("Cannot read symbol table: %s\n",
7485 strerror(errno));
7486 }
7487 + base = 0;
7488 + for (j = 0; j < ehdr.e_phnum; j++) {
7489 + if (phdr[j].p_type != PT_LOAD )
7490 + continue;
7491 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7492 + continue;
7493 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7494 + break;
7495 + }
7496 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7497 Elf32_Rel *rel = &sec->reltab[j];
7498 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7499 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7500 rel->r_info = elf32_to_cpu(rel->r_info);
7501 }
7502 }
7503 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7504
7505 static void print_absolute_symbols(void)
7506 {
7507 - int i;
7508 + unsigned int i;
7509 printf("Absolute symbols\n");
7510 printf(" Num: Value Size Type Bind Visibility Name\n");
7511 for (i = 0; i < ehdr.e_shnum; i++) {
7512 struct section *sec = &secs[i];
7513 char *sym_strtab;
7514 Elf32_Sym *sh_symtab;
7515 - int j;
7516 + unsigned int j;
7517
7518 if (sec->shdr.sh_type != SHT_SYMTAB) {
7519 continue;
7520 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7521
7522 static void print_absolute_relocs(void)
7523 {
7524 - int i, printed = 0;
7525 + unsigned int i, printed = 0;
7526
7527 for (i = 0; i < ehdr.e_shnum; i++) {
7528 struct section *sec = &secs[i];
7529 struct section *sec_applies, *sec_symtab;
7530 char *sym_strtab;
7531 Elf32_Sym *sh_symtab;
7532 - int j;
7533 + unsigned int j;
7534 if (sec->shdr.sh_type != SHT_REL) {
7535 continue;
7536 }
7537 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7538
7539 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7540 {
7541 - int i;
7542 + unsigned int i;
7543 /* Walk through the relocations */
7544 for (i = 0; i < ehdr.e_shnum; i++) {
7545 char *sym_strtab;
7546 Elf32_Sym *sh_symtab;
7547 struct section *sec_applies, *sec_symtab;
7548 - int j;
7549 + unsigned int j;
7550 struct section *sec = &secs[i];
7551
7552 if (sec->shdr.sh_type != SHT_REL) {
7553 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7554 if (sym->st_shndx == SHN_ABS) {
7555 continue;
7556 }
7557 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7558 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7559 + continue;
7560 +
7561 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7562 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7563 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7564 + continue;
7565 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7566 + continue;
7567 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7568 + continue;
7569 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7570 + continue;
7571 +#endif
7572 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7573 /*
7574 * NONE can be ignored and and PC relative
7575 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7576
7577 static void emit_relocs(int as_text)
7578 {
7579 - int i;
7580 + unsigned int i;
7581 /* Count how many relocations I have and allocate space for them. */
7582 reloc_count = 0;
7583 walk_relocs(count_reloc);
7584 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
7585 fname, strerror(errno));
7586 }
7587 read_ehdr(fp);
7588 + read_phdrs(fp);
7589 read_shdrs(fp);
7590 read_strtabs(fp);
7591 read_symtabs(fp);
7592 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7593 index 4d3ff03..e4972ff 100644
7594 --- a/arch/x86/boot/cpucheck.c
7595 +++ b/arch/x86/boot/cpucheck.c
7596 @@ -74,7 +74,7 @@ static int has_fpu(void)
7597 u16 fcw = -1, fsw = -1;
7598 u32 cr0;
7599
7600 - asm("movl %%cr0,%0" : "=r" (cr0));
7601 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7602 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7603 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7604 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7605 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7606 {
7607 u32 f0, f1;
7608
7609 - asm("pushfl ; "
7610 + asm volatile("pushfl ; "
7611 "pushfl ; "
7612 "popl %0 ; "
7613 "movl %0,%1 ; "
7614 @@ -115,7 +115,7 @@ static void get_flags(void)
7615 set_bit(X86_FEATURE_FPU, cpu.flags);
7616
7617 if (has_eflag(X86_EFLAGS_ID)) {
7618 - asm("cpuid"
7619 + asm volatile("cpuid"
7620 : "=a" (max_intel_level),
7621 "=b" (cpu_vendor[0]),
7622 "=d" (cpu_vendor[1]),
7623 @@ -124,7 +124,7 @@ static void get_flags(void)
7624
7625 if (max_intel_level >= 0x00000001 &&
7626 max_intel_level <= 0x0000ffff) {
7627 - asm("cpuid"
7628 + asm volatile("cpuid"
7629 : "=a" (tfms),
7630 "=c" (cpu.flags[4]),
7631 "=d" (cpu.flags[0])
7632 @@ -136,7 +136,7 @@ static void get_flags(void)
7633 cpu.model += ((tfms >> 16) & 0xf) << 4;
7634 }
7635
7636 - asm("cpuid"
7637 + asm volatile("cpuid"
7638 : "=a" (max_amd_level)
7639 : "a" (0x80000000)
7640 : "ebx", "ecx", "edx");
7641 @@ -144,7 +144,7 @@ static void get_flags(void)
7642 if (max_amd_level >= 0x80000001 &&
7643 max_amd_level <= 0x8000ffff) {
7644 u32 eax = 0x80000001;
7645 - asm("cpuid"
7646 + asm volatile("cpuid"
7647 : "+a" (eax),
7648 "=c" (cpu.flags[6]),
7649 "=d" (cpu.flags[1])
7650 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7651 u32 ecx = MSR_K7_HWCR;
7652 u32 eax, edx;
7653
7654 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7655 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7656 eax &= ~(1 << 15);
7657 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7658 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7659
7660 get_flags(); /* Make sure it really did something */
7661 err = check_flags();
7662 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7663 u32 ecx = MSR_VIA_FCR;
7664 u32 eax, edx;
7665
7666 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7667 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7668 eax |= (1<<1)|(1<<7);
7669 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7670 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7671
7672 set_bit(X86_FEATURE_CX8, cpu.flags);
7673 err = check_flags();
7674 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7675 u32 eax, edx;
7676 u32 level = 1;
7677
7678 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7679 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7680 - asm("cpuid"
7681 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7682 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7683 + asm volatile("cpuid"
7684 : "+a" (level), "=d" (cpu.flags[0])
7685 : : "ecx", "ebx");
7686 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7687 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7688
7689 err = check_flags();
7690 }
7691 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7692 index b31cc54..8d69237 100644
7693 --- a/arch/x86/boot/header.S
7694 +++ b/arch/x86/boot/header.S
7695 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7696 # single linked list of
7697 # struct setup_data
7698
7699 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7700 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7701
7702 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7703 #define VO_INIT_SIZE (VO__end - VO__text)
7704 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7705 index cae3feb..ff8ff2a 100644
7706 --- a/arch/x86/boot/memory.c
7707 +++ b/arch/x86/boot/memory.c
7708 @@ -19,7 +19,7 @@
7709
7710 static int detect_memory_e820(void)
7711 {
7712 - int count = 0;
7713 + unsigned int count = 0;
7714 struct biosregs ireg, oreg;
7715 struct e820entry *desc = boot_params.e820_map;
7716 static struct e820entry buf; /* static so it is zeroed */
7717 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7718 index 11e8c6e..fdbb1ed 100644
7719 --- a/arch/x86/boot/video-vesa.c
7720 +++ b/arch/x86/boot/video-vesa.c
7721 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7722
7723 boot_params.screen_info.vesapm_seg = oreg.es;
7724 boot_params.screen_info.vesapm_off = oreg.di;
7725 + boot_params.screen_info.vesapm_size = oreg.cx;
7726 }
7727
7728 /*
7729 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7730 index d42da38..787cdf3 100644
7731 --- a/arch/x86/boot/video.c
7732 +++ b/arch/x86/boot/video.c
7733 @@ -90,7 +90,7 @@ static void store_mode_params(void)
7734 static unsigned int get_entry(void)
7735 {
7736 char entry_buf[4];
7737 - int i, len = 0;
7738 + unsigned int i, len = 0;
7739 int key;
7740 unsigned int v;
7741
7742 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7743 index 5b577d5..3c1fed4 100644
7744 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7745 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7746 @@ -8,6 +8,8 @@
7747 * including this sentence is retained in full.
7748 */
7749
7750 +#include <asm/alternative-asm.h>
7751 +
7752 .extern crypto_ft_tab
7753 .extern crypto_it_tab
7754 .extern crypto_fl_tab
7755 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7756 je B192; \
7757 leaq 32(r9),r9;
7758
7759 +#define ret pax_force_retaddr 0, 1; ret
7760 +
7761 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7762 movq r1,r2; \
7763 movq r3,r4; \
7764 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7765 index eb0566e..e3ebad8 100644
7766 --- a/arch/x86/crypto/aesni-intel_asm.S
7767 +++ b/arch/x86/crypto/aesni-intel_asm.S
7768 @@ -16,6 +16,7 @@
7769 */
7770
7771 #include <linux/linkage.h>
7772 +#include <asm/alternative-asm.h>
7773
7774 .text
7775
7776 @@ -52,6 +53,7 @@ _key_expansion_256a:
7777 pxor %xmm1, %xmm0
7778 movaps %xmm0, (%rcx)
7779 add $0x10, %rcx
7780 + pax_force_retaddr_bts
7781 ret
7782
7783 _key_expansion_192a:
7784 @@ -75,6 +77,7 @@ _key_expansion_192a:
7785 shufps $0b01001110, %xmm2, %xmm1
7786 movaps %xmm1, 16(%rcx)
7787 add $0x20, %rcx
7788 + pax_force_retaddr_bts
7789 ret
7790
7791 _key_expansion_192b:
7792 @@ -93,6 +96,7 @@ _key_expansion_192b:
7793
7794 movaps %xmm0, (%rcx)
7795 add $0x10, %rcx
7796 + pax_force_retaddr_bts
7797 ret
7798
7799 _key_expansion_256b:
7800 @@ -104,6 +108,7 @@ _key_expansion_256b:
7801 pxor %xmm1, %xmm2
7802 movaps %xmm2, (%rcx)
7803 add $0x10, %rcx
7804 + pax_force_retaddr_bts
7805 ret
7806
7807 /*
7808 @@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7809 cmp %rcx, %rdi
7810 jb .Ldec_key_loop
7811 xor %rax, %rax
7812 + pax_force_retaddr 0, 1
7813 ret
7814 +ENDPROC(aesni_set_key)
7815
7816 /*
7817 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7818 @@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7819 movups (INP), STATE # input
7820 call _aesni_enc1
7821 movups STATE, (OUTP) # output
7822 + pax_force_retaddr 0, 1
7823 ret
7824 +ENDPROC(aesni_enc)
7825
7826 /*
7827 * _aesni_enc1: internal ABI
7828 @@ -319,6 +328,7 @@ _aesni_enc1:
7829 movaps 0x70(TKEYP), KEY
7830 # aesenclast KEY, STATE # last round
7831 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7832 + pax_force_retaddr_bts
7833 ret
7834
7835 /*
7836 @@ -482,6 +492,7 @@ _aesni_enc4:
7837 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7838 # aesenclast KEY, STATE4
7839 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7840 + pax_force_retaddr_bts
7841 ret
7842
7843 /*
7844 @@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7845 movups (INP), STATE # input
7846 call _aesni_dec1
7847 movups STATE, (OUTP) #output
7848 + pax_force_retaddr 0, 1
7849 ret
7850 +ENDPROC(aesni_dec)
7851
7852 /*
7853 * _aesni_dec1: internal ABI
7854 @@ -563,6 +576,7 @@ _aesni_dec1:
7855 movaps 0x70(TKEYP), KEY
7856 # aesdeclast KEY, STATE # last round
7857 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7858 + pax_force_retaddr_bts
7859 ret
7860
7861 /*
7862 @@ -726,6 +740,7 @@ _aesni_dec4:
7863 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7864 # aesdeclast KEY, STATE4
7865 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7866 + pax_force_retaddr_bts
7867 ret
7868
7869 /*
7870 @@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7871 cmp $16, LEN
7872 jge .Lecb_enc_loop1
7873 .Lecb_enc_ret:
7874 + pax_force_retaddr 0, 1
7875 ret
7876 +ENDPROC(aesni_ecb_enc)
7877
7878 /*
7879 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7880 @@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7881 cmp $16, LEN
7882 jge .Lecb_dec_loop1
7883 .Lecb_dec_ret:
7884 + pax_force_retaddr 0, 1
7885 ret
7886 +ENDPROC(aesni_ecb_dec)
7887
7888 /*
7889 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7890 @@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7891 jge .Lcbc_enc_loop
7892 movups STATE, (IVP)
7893 .Lcbc_enc_ret:
7894 + pax_force_retaddr 0, 1
7895 ret
7896 +ENDPROC(aesni_cbc_enc)
7897
7898 /*
7899 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7900 @@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7901 .Lcbc_dec_ret:
7902 movups IV, (IVP)
7903 .Lcbc_dec_just_ret:
7904 + pax_force_retaddr 0, 1
7905 ret
7906 +ENDPROC(aesni_cbc_dec)
7907 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7908 index 6214a9b..1f4fc9a 100644
7909 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7910 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7911 @@ -1,3 +1,5 @@
7912 +#include <asm/alternative-asm.h>
7913 +
7914 # enter ECRYPT_encrypt_bytes
7915 .text
7916 .p2align 5
7917 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7918 add %r11,%rsp
7919 mov %rdi,%rax
7920 mov %rsi,%rdx
7921 + pax_force_retaddr 0, 1
7922 ret
7923 # bytesatleast65:
7924 ._bytesatleast65:
7925 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
7926 add %r11,%rsp
7927 mov %rdi,%rax
7928 mov %rsi,%rdx
7929 + pax_force_retaddr
7930 ret
7931 # enter ECRYPT_ivsetup
7932 .text
7933 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7934 add %r11,%rsp
7935 mov %rdi,%rax
7936 mov %rsi,%rdx
7937 + pax_force_retaddr
7938 ret
7939 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7940 index 35974a5..5662ae2 100644
7941 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7942 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7943 @@ -21,6 +21,7 @@
7944 .text
7945
7946 #include <asm/asm-offsets.h>
7947 +#include <asm/alternative-asm.h>
7948
7949 #define a_offset 0
7950 #define b_offset 4
7951 @@ -269,6 +270,7 @@ twofish_enc_blk:
7952
7953 popq R1
7954 movq $1,%rax
7955 + pax_force_retaddr 0, 1
7956 ret
7957
7958 twofish_dec_blk:
7959 @@ -321,4 +323,5 @@ twofish_dec_blk:
7960
7961 popq R1
7962 movq $1,%rax
7963 + pax_force_retaddr 0, 1
7964 ret
7965 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7966 index 14531ab..bc68a7b 100644
7967 --- a/arch/x86/ia32/ia32_aout.c
7968 +++ b/arch/x86/ia32/ia32_aout.c
7969 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7970 unsigned long dump_start, dump_size;
7971 struct user32 dump;
7972
7973 + memset(&dump, 0, sizeof(dump));
7974 +
7975 fs = get_fs();
7976 set_fs(KERNEL_DS);
7977 has_dumped = 1;
7978 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7979 dump_size = dump.u_ssize << PAGE_SHIFT;
7980 DUMP_WRITE(dump_start, dump_size);
7981 }
7982 - /*
7983 - * Finally dump the task struct. Not be used by gdb, but
7984 - * could be useful
7985 - */
7986 - set_fs(KERNEL_DS);
7987 - DUMP_WRITE(current, sizeof(*current));
7988 end_coredump:
7989 set_fs(fs);
7990 return has_dumped;
7991 @@ -327,6 +323,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
7992 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
7993 current->mm->cached_hole_size = 0;
7994
7995 + retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
7996 + if (retval < 0) {
7997 + /* Someone check-me: is this error path enough? */
7998 + send_sig(SIGKILL, current, 0);
7999 + return retval;
8000 + }
8001 +
8002 install_exec_creds(bprm);
8003 current->flags &= ~PF_FORKNOEXEC;
8004
8005 @@ -422,13 +425,6 @@ beyond_if:
8006
8007 set_brk(current->mm->start_brk, current->mm->brk);
8008
8009 - retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8010 - if (retval < 0) {
8011 - /* Someone check-me: is this error path enough? */
8012 - send_sig(SIGKILL, current, 0);
8013 - return retval;
8014 - }
8015 -
8016 current->mm->start_stack =
8017 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
8018 /* start thread */
8019 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8020 index 588a7aa..a3468b0 100644
8021 --- a/arch/x86/ia32/ia32_signal.c
8022 +++ b/arch/x86/ia32/ia32_signal.c
8023 @@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8024 }
8025 seg = get_fs();
8026 set_fs(KERNEL_DS);
8027 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8028 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8029 set_fs(seg);
8030 if (ret >= 0 && uoss_ptr) {
8031 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8032 @@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8033 */
8034 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8035 size_t frame_size,
8036 - void **fpstate)
8037 + void __user **fpstate)
8038 {
8039 unsigned long sp;
8040
8041 @@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8042
8043 if (used_math()) {
8044 sp = sp - sig_xstate_ia32_size;
8045 - *fpstate = (struct _fpstate_ia32 *) sp;
8046 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8047 if (save_i387_xstate_ia32(*fpstate) < 0)
8048 return (void __user *) -1L;
8049 }
8050 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8051 sp -= frame_size;
8052 /* Align the stack pointer according to the i386 ABI,
8053 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8054 - sp = ((sp + 4) & -16ul) - 4;
8055 + sp = ((sp - 12) & -16ul) - 4;
8056 return (void __user *) sp;
8057 }
8058
8059 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8060 * These are actually not used anymore, but left because some
8061 * gdb versions depend on them as a marker.
8062 */
8063 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8064 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8065 } put_user_catch(err);
8066
8067 if (err)
8068 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8069 0xb8,
8070 __NR_ia32_rt_sigreturn,
8071 0x80cd,
8072 - 0,
8073 + 0
8074 };
8075
8076 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8077 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8078
8079 if (ka->sa.sa_flags & SA_RESTORER)
8080 restorer = ka->sa.sa_restorer;
8081 + else if (current->mm->context.vdso)
8082 + /* Return stub is in 32bit vsyscall page */
8083 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8084 else
8085 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8086 - rt_sigreturn);
8087 + restorer = &frame->retcode;
8088 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8089
8090 /*
8091 * Not actually used anymore, but left because some gdb
8092 * versions need it.
8093 */
8094 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8095 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8096 } put_user_catch(err);
8097
8098 if (err)
8099 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8100 index 4edd8eb..29124b4 100644
8101 --- a/arch/x86/ia32/ia32entry.S
8102 +++ b/arch/x86/ia32/ia32entry.S
8103 @@ -13,7 +13,9 @@
8104 #include <asm/thread_info.h>
8105 #include <asm/segment.h>
8106 #include <asm/irqflags.h>
8107 +#include <asm/pgtable.h>
8108 #include <linux/linkage.h>
8109 +#include <asm/alternative-asm.h>
8110
8111 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8112 #include <linux/elf-em.h>
8113 @@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
8114 ENDPROC(native_irq_enable_sysexit)
8115 #endif
8116
8117 + .macro pax_enter_kernel_user
8118 + pax_set_fptr_mask
8119 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8120 + call pax_enter_kernel_user
8121 +#endif
8122 + .endm
8123 +
8124 + .macro pax_exit_kernel_user
8125 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8126 + call pax_exit_kernel_user
8127 +#endif
8128 +#ifdef CONFIG_PAX_RANDKSTACK
8129 + pushq %rax
8130 + pushq %r11
8131 + call pax_randomize_kstack
8132 + popq %r11
8133 + popq %rax
8134 +#endif
8135 + .endm
8136 +
8137 +.macro pax_erase_kstack
8138 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8139 + call pax_erase_kstack
8140 +#endif
8141 +.endm
8142 +
8143 /*
8144 * 32bit SYSENTER instruction entry.
8145 *
8146 @@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
8147 CFI_REGISTER rsp,rbp
8148 SWAPGS_UNSAFE_STACK
8149 movq PER_CPU_VAR(kernel_stack), %rsp
8150 - addq $(KERNEL_STACK_OFFSET),%rsp
8151 - /*
8152 - * No need to follow this irqs on/off section: the syscall
8153 - * disabled irqs, here we enable it straight after entry:
8154 - */
8155 - ENABLE_INTERRUPTS(CLBR_NONE)
8156 movl %ebp,%ebp /* zero extension */
8157 pushq $__USER32_DS
8158 CFI_ADJUST_CFA_OFFSET 8
8159 @@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
8160 pushfq
8161 CFI_ADJUST_CFA_OFFSET 8
8162 /*CFI_REL_OFFSET rflags,0*/
8163 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
8164 - CFI_REGISTER rip,r10
8165 + orl $X86_EFLAGS_IF,(%rsp)
8166 + GET_THREAD_INFO(%r11)
8167 + movl TI_sysenter_return(%r11), %r11d
8168 + CFI_REGISTER rip,r11
8169 pushq $__USER32_CS
8170 CFI_ADJUST_CFA_OFFSET 8
8171 /*CFI_REL_OFFSET cs,0*/
8172 movl %eax, %eax
8173 - pushq %r10
8174 + pushq %r11
8175 CFI_ADJUST_CFA_OFFSET 8
8176 CFI_REL_OFFSET rip,0
8177 pushq %rax
8178 CFI_ADJUST_CFA_OFFSET 8
8179 cld
8180 SAVE_ARGS 0,0,1
8181 + pax_enter_kernel_user
8182 + /*
8183 + * No need to follow this irqs on/off section: the syscall
8184 + * disabled irqs, here we enable it straight after entry:
8185 + */
8186 + ENABLE_INTERRUPTS(CLBR_NONE)
8187 /* no need to do an access_ok check here because rbp has been
8188 32bit zero extended */
8189 +
8190 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8191 + mov $PAX_USER_SHADOW_BASE,%r11
8192 + add %r11,%rbp
8193 +#endif
8194 +
8195 1: movl (%rbp),%ebp
8196 .section __ex_table,"a"
8197 .quad 1b,ia32_badarg
8198 .previous
8199 - GET_THREAD_INFO(%r10)
8200 - orl $TS_COMPAT,TI_status(%r10)
8201 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8202 + GET_THREAD_INFO(%r11)
8203 + orl $TS_COMPAT,TI_status(%r11)
8204 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8205 CFI_REMEMBER_STATE
8206 jnz sysenter_tracesys
8207 cmpq $(IA32_NR_syscalls-1),%rax
8208 @@ -166,13 +202,15 @@ sysenter_do_call:
8209 sysenter_dispatch:
8210 call *ia32_sys_call_table(,%rax,8)
8211 movq %rax,RAX-ARGOFFSET(%rsp)
8212 - GET_THREAD_INFO(%r10)
8213 + GET_THREAD_INFO(%r11)
8214 DISABLE_INTERRUPTS(CLBR_NONE)
8215 TRACE_IRQS_OFF
8216 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8217 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8218 jnz sysexit_audit
8219 sysexit_from_sys_call:
8220 - andl $~TS_COMPAT,TI_status(%r10)
8221 + pax_exit_kernel_user
8222 + pax_erase_kstack
8223 + andl $~TS_COMPAT,TI_status(%r11)
8224 /* clear IF, that popfq doesn't enable interrupts early */
8225 andl $~0x200,EFLAGS-R11(%rsp)
8226 movl RIP-R11(%rsp),%edx /* User %eip */
8227 @@ -200,6 +238,9 @@ sysexit_from_sys_call:
8228 movl %eax,%esi /* 2nd arg: syscall number */
8229 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8230 call audit_syscall_entry
8231 +
8232 + pax_erase_kstack
8233 +
8234 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8235 cmpq $(IA32_NR_syscalls-1),%rax
8236 ja ia32_badsys
8237 @@ -211,7 +252,7 @@ sysexit_from_sys_call:
8238 .endm
8239
8240 .macro auditsys_exit exit
8241 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8242 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8243 jnz ia32_ret_from_sys_call
8244 TRACE_IRQS_ON
8245 sti
8246 @@ -221,12 +262,12 @@ sysexit_from_sys_call:
8247 movzbl %al,%edi /* zero-extend that into %edi */
8248 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8249 call audit_syscall_exit
8250 - GET_THREAD_INFO(%r10)
8251 + GET_THREAD_INFO(%r11)
8252 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8253 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8254 cli
8255 TRACE_IRQS_OFF
8256 - testl %edi,TI_flags(%r10)
8257 + testl %edi,TI_flags(%r11)
8258 jz \exit
8259 CLEAR_RREGS -ARGOFFSET
8260 jmp int_with_check
8261 @@ -244,7 +285,7 @@ sysexit_audit:
8262
8263 sysenter_tracesys:
8264 #ifdef CONFIG_AUDITSYSCALL
8265 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8266 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8267 jz sysenter_auditsys
8268 #endif
8269 SAVE_REST
8270 @@ -252,6 +293,9 @@ sysenter_tracesys:
8271 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8272 movq %rsp,%rdi /* &pt_regs -> arg1 */
8273 call syscall_trace_enter
8274 +
8275 + pax_erase_kstack
8276 +
8277 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8278 RESTORE_REST
8279 cmpq $(IA32_NR_syscalls-1),%rax
8280 @@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
8281 ENTRY(ia32_cstar_target)
8282 CFI_STARTPROC32 simple
8283 CFI_SIGNAL_FRAME
8284 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8285 + CFI_DEF_CFA rsp,0
8286 CFI_REGISTER rip,rcx
8287 /*CFI_REGISTER rflags,r11*/
8288 SWAPGS_UNSAFE_STACK
8289 movl %esp,%r8d
8290 CFI_REGISTER rsp,r8
8291 movq PER_CPU_VAR(kernel_stack),%rsp
8292 + SAVE_ARGS 8*6,1,1
8293 + pax_enter_kernel_user
8294 /*
8295 * No need to follow this irqs on/off section: the syscall
8296 * disabled irqs and here we enable it straight after entry:
8297 */
8298 ENABLE_INTERRUPTS(CLBR_NONE)
8299 - SAVE_ARGS 8,1,1
8300 movl %eax,%eax /* zero extension */
8301 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8302 movq %rcx,RIP-ARGOFFSET(%rsp)
8303 @@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
8304 /* no need to do an access_ok check here because r8 has been
8305 32bit zero extended */
8306 /* hardware stack frame is complete now */
8307 +
8308 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8309 + mov $PAX_USER_SHADOW_BASE,%r11
8310 + add %r11,%r8
8311 +#endif
8312 +
8313 1: movl (%r8),%r9d
8314 .section __ex_table,"a"
8315 .quad 1b,ia32_badarg
8316 .previous
8317 - GET_THREAD_INFO(%r10)
8318 - orl $TS_COMPAT,TI_status(%r10)
8319 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8320 + GET_THREAD_INFO(%r11)
8321 + orl $TS_COMPAT,TI_status(%r11)
8322 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8323 CFI_REMEMBER_STATE
8324 jnz cstar_tracesys
8325 cmpq $IA32_NR_syscalls-1,%rax
8326 @@ -327,13 +378,15 @@ cstar_do_call:
8327 cstar_dispatch:
8328 call *ia32_sys_call_table(,%rax,8)
8329 movq %rax,RAX-ARGOFFSET(%rsp)
8330 - GET_THREAD_INFO(%r10)
8331 + GET_THREAD_INFO(%r11)
8332 DISABLE_INTERRUPTS(CLBR_NONE)
8333 TRACE_IRQS_OFF
8334 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8335 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8336 jnz sysretl_audit
8337 sysretl_from_sys_call:
8338 - andl $~TS_COMPAT,TI_status(%r10)
8339 + pax_exit_kernel_user
8340 + pax_erase_kstack
8341 + andl $~TS_COMPAT,TI_status(%r11)
8342 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8343 movl RIP-ARGOFFSET(%rsp),%ecx
8344 CFI_REGISTER rip,rcx
8345 @@ -361,7 +414,7 @@ sysretl_audit:
8346
8347 cstar_tracesys:
8348 #ifdef CONFIG_AUDITSYSCALL
8349 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8350 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8351 jz cstar_auditsys
8352 #endif
8353 xchgl %r9d,%ebp
8354 @@ -370,6 +423,9 @@ cstar_tracesys:
8355 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8356 movq %rsp,%rdi /* &pt_regs -> arg1 */
8357 call syscall_trace_enter
8358 +
8359 + pax_erase_kstack
8360 +
8361 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8362 RESTORE_REST
8363 xchgl %ebp,%r9d
8364 @@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
8365 CFI_REL_OFFSET rip,RIP-RIP
8366 PARAVIRT_ADJUST_EXCEPTION_FRAME
8367 SWAPGS
8368 - /*
8369 - * No need to follow this irqs on/off section: the syscall
8370 - * disabled irqs and here we enable it straight after entry:
8371 - */
8372 - ENABLE_INTERRUPTS(CLBR_NONE)
8373 movl %eax,%eax
8374 pushq %rax
8375 CFI_ADJUST_CFA_OFFSET 8
8376 @@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
8377 /* note the registers are not zero extended to the sf.
8378 this could be a problem. */
8379 SAVE_ARGS 0,0,1
8380 - GET_THREAD_INFO(%r10)
8381 - orl $TS_COMPAT,TI_status(%r10)
8382 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8383 + pax_enter_kernel_user
8384 + /*
8385 + * No need to follow this irqs on/off section: the syscall
8386 + * disabled irqs and here we enable it straight after entry:
8387 + */
8388 + ENABLE_INTERRUPTS(CLBR_NONE)
8389 + GET_THREAD_INFO(%r11)
8390 + orl $TS_COMPAT,TI_status(%r11)
8391 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8392 jnz ia32_tracesys
8393 cmpq $(IA32_NR_syscalls-1),%rax
8394 ja ia32_badsys
8395 @@ -448,6 +505,9 @@ ia32_tracesys:
8396 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8397 movq %rsp,%rdi /* &pt_regs -> arg1 */
8398 call syscall_trace_enter
8399 +
8400 + pax_erase_kstack
8401 +
8402 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8403 RESTORE_REST
8404 cmpq $(IA32_NR_syscalls-1),%rax
8405 @@ -462,6 +522,7 @@ ia32_badsys:
8406
8407 quiet_ni_syscall:
8408 movq $-ENOSYS,%rax
8409 + pax_force_retaddr
8410 ret
8411 CFI_ENDPROC
8412
8413 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8414 index 016218c..47ccbdd 100644
8415 --- a/arch/x86/ia32/sys_ia32.c
8416 +++ b/arch/x86/ia32/sys_ia32.c
8417 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8418 */
8419 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8420 {
8421 - typeof(ubuf->st_uid) uid = 0;
8422 - typeof(ubuf->st_gid) gid = 0;
8423 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8424 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8425 SET_UID(uid, stat->uid);
8426 SET_GID(gid, stat->gid);
8427 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8428 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8429 }
8430 set_fs(KERNEL_DS);
8431 ret = sys_rt_sigprocmask(how,
8432 - set ? (sigset_t __user *)&s : NULL,
8433 - oset ? (sigset_t __user *)&s : NULL,
8434 + set ? (sigset_t __force_user *)&s : NULL,
8435 + oset ? (sigset_t __force_user *)&s : NULL,
8436 sigsetsize);
8437 set_fs(old_fs);
8438 if (ret)
8439 @@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8440 mm_segment_t old_fs = get_fs();
8441
8442 set_fs(KERNEL_DS);
8443 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8444 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8445 set_fs(old_fs);
8446 if (put_compat_timespec(&t, interval))
8447 return -EFAULT;
8448 @@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8449 mm_segment_t old_fs = get_fs();
8450
8451 set_fs(KERNEL_DS);
8452 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8453 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8454 set_fs(old_fs);
8455 if (!ret) {
8456 switch (_NSIG_WORDS) {
8457 @@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8458 if (copy_siginfo_from_user32(&info, uinfo))
8459 return -EFAULT;
8460 set_fs(KERNEL_DS);
8461 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8462 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8463 set_fs(old_fs);
8464 return ret;
8465 }
8466 @@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8467 return -EFAULT;
8468
8469 set_fs(KERNEL_DS);
8470 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8471 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8472 count);
8473 set_fs(old_fs);
8474
8475 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8476 index e2077d3..17d07ad 100644
8477 --- a/arch/x86/include/asm/alternative-asm.h
8478 +++ b/arch/x86/include/asm/alternative-asm.h
8479 @@ -8,10 +8,10 @@
8480
8481 #ifdef CONFIG_SMP
8482 .macro LOCK_PREFIX
8483 -1: lock
8484 +672: lock
8485 .section .smp_locks,"a"
8486 .align 4
8487 - X86_ALIGN 1b
8488 + X86_ALIGN 672b
8489 .previous
8490 .endm
8491 #else
8492 @@ -19,4 +19,43 @@
8493 .endm
8494 #endif
8495
8496 +#ifdef KERNEXEC_PLUGIN
8497 + .macro pax_force_retaddr_bts rip=0
8498 + btsq $63,\rip(%rsp)
8499 + .endm
8500 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8501 + .macro pax_force_retaddr rip=0, reload=0
8502 + btsq $63,\rip(%rsp)
8503 + .endm
8504 + .macro pax_force_fptr ptr
8505 + btsq $63,\ptr
8506 + .endm
8507 + .macro pax_set_fptr_mask
8508 + .endm
8509 +#endif
8510 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8511 + .macro pax_force_retaddr rip=0, reload=0
8512 + .if \reload
8513 + pax_set_fptr_mask
8514 + .endif
8515 + orq %r10,\rip(%rsp)
8516 + .endm
8517 + .macro pax_force_fptr ptr
8518 + orq %r10,\ptr
8519 + .endm
8520 + .macro pax_set_fptr_mask
8521 + movabs $0x8000000000000000,%r10
8522 + .endm
8523 +#endif
8524 +#else
8525 + .macro pax_force_retaddr rip=0, reload=0
8526 + .endm
8527 + .macro pax_force_fptr ptr
8528 + .endm
8529 + .macro pax_force_retaddr_bts rip=0
8530 + .endm
8531 + .macro pax_set_fptr_mask
8532 + .endm
8533 +#endif
8534 +
8535 #endif /* __ASSEMBLY__ */
8536 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8537 index c240efc..fdfadf3 100644
8538 --- a/arch/x86/include/asm/alternative.h
8539 +++ b/arch/x86/include/asm/alternative.h
8540 @@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8541 " .byte 662b-661b\n" /* sourcelen */ \
8542 " .byte 664f-663f\n" /* replacementlen */ \
8543 ".previous\n" \
8544 - ".section .altinstr_replacement, \"ax\"\n" \
8545 + ".section .altinstr_replacement, \"a\"\n" \
8546 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8547 ".previous"
8548
8549 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8550 index 474d80d..1f97d58 100644
8551 --- a/arch/x86/include/asm/apic.h
8552 +++ b/arch/x86/include/asm/apic.h
8553 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8554
8555 #ifdef CONFIG_X86_LOCAL_APIC
8556
8557 -extern unsigned int apic_verbosity;
8558 +extern int apic_verbosity;
8559 extern int local_apic_timer_c2_ok;
8560
8561 extern int disable_apic;
8562 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8563 index 20370c6..a2eb9b0 100644
8564 --- a/arch/x86/include/asm/apm.h
8565 +++ b/arch/x86/include/asm/apm.h
8566 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8567 __asm__ __volatile__(APM_DO_ZERO_SEGS
8568 "pushl %%edi\n\t"
8569 "pushl %%ebp\n\t"
8570 - "lcall *%%cs:apm_bios_entry\n\t"
8571 + "lcall *%%ss:apm_bios_entry\n\t"
8572 "setc %%al\n\t"
8573 "popl %%ebp\n\t"
8574 "popl %%edi\n\t"
8575 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8576 __asm__ __volatile__(APM_DO_ZERO_SEGS
8577 "pushl %%edi\n\t"
8578 "pushl %%ebp\n\t"
8579 - "lcall *%%cs:apm_bios_entry\n\t"
8580 + "lcall *%%ss:apm_bios_entry\n\t"
8581 "setc %%bl\n\t"
8582 "popl %%ebp\n\t"
8583 "popl %%edi\n\t"
8584 diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8585 index dc5a667..939040c 100644
8586 --- a/arch/x86/include/asm/atomic_32.h
8587 +++ b/arch/x86/include/asm/atomic_32.h
8588 @@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8589 }
8590
8591 /**
8592 + * atomic_read_unchecked - read atomic variable
8593 + * @v: pointer of type atomic_unchecked_t
8594 + *
8595 + * Atomically reads the value of @v.
8596 + */
8597 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8598 +{
8599 + return v->counter;
8600 +}
8601 +
8602 +/**
8603 * atomic_set - set atomic variable
8604 * @v: pointer of type atomic_t
8605 * @i: required value
8606 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8607 }
8608
8609 /**
8610 + * atomic_set_unchecked - set atomic variable
8611 + * @v: pointer of type atomic_unchecked_t
8612 + * @i: required value
8613 + *
8614 + * Atomically sets the value of @v to @i.
8615 + */
8616 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8617 +{
8618 + v->counter = i;
8619 +}
8620 +
8621 +/**
8622 * atomic_add - add integer to atomic variable
8623 * @i: integer value to add
8624 * @v: pointer of type atomic_t
8625 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8626 */
8627 static inline void atomic_add(int i, atomic_t *v)
8628 {
8629 - asm volatile(LOCK_PREFIX "addl %1,%0"
8630 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8631 +
8632 +#ifdef CONFIG_PAX_REFCOUNT
8633 + "jno 0f\n"
8634 + LOCK_PREFIX "subl %1,%0\n"
8635 + "int $4\n0:\n"
8636 + _ASM_EXTABLE(0b, 0b)
8637 +#endif
8638 +
8639 + : "+m" (v->counter)
8640 + : "ir" (i));
8641 +}
8642 +
8643 +/**
8644 + * atomic_add_unchecked - add integer to atomic variable
8645 + * @i: integer value to add
8646 + * @v: pointer of type atomic_unchecked_t
8647 + *
8648 + * Atomically adds @i to @v.
8649 + */
8650 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8651 +{
8652 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8653 : "+m" (v->counter)
8654 : "ir" (i));
8655 }
8656 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8657 */
8658 static inline void atomic_sub(int i, atomic_t *v)
8659 {
8660 - asm volatile(LOCK_PREFIX "subl %1,%0"
8661 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8662 +
8663 +#ifdef CONFIG_PAX_REFCOUNT
8664 + "jno 0f\n"
8665 + LOCK_PREFIX "addl %1,%0\n"
8666 + "int $4\n0:\n"
8667 + _ASM_EXTABLE(0b, 0b)
8668 +#endif
8669 +
8670 + : "+m" (v->counter)
8671 + : "ir" (i));
8672 +}
8673 +
8674 +/**
8675 + * atomic_sub_unchecked - subtract integer from atomic variable
8676 + * @i: integer value to subtract
8677 + * @v: pointer of type atomic_unchecked_t
8678 + *
8679 + * Atomically subtracts @i from @v.
8680 + */
8681 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8682 +{
8683 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8684 : "+m" (v->counter)
8685 : "ir" (i));
8686 }
8687 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8688 {
8689 unsigned char c;
8690
8691 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8692 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8693 +
8694 +#ifdef CONFIG_PAX_REFCOUNT
8695 + "jno 0f\n"
8696 + LOCK_PREFIX "addl %2,%0\n"
8697 + "int $4\n0:\n"
8698 + _ASM_EXTABLE(0b, 0b)
8699 +#endif
8700 +
8701 + "sete %1\n"
8702 : "+m" (v->counter), "=qm" (c)
8703 : "ir" (i) : "memory");
8704 return c;
8705 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8706 */
8707 static inline void atomic_inc(atomic_t *v)
8708 {
8709 - asm volatile(LOCK_PREFIX "incl %0"
8710 + asm volatile(LOCK_PREFIX "incl %0\n"
8711 +
8712 +#ifdef CONFIG_PAX_REFCOUNT
8713 + "jno 0f\n"
8714 + LOCK_PREFIX "decl %0\n"
8715 + "int $4\n0:\n"
8716 + _ASM_EXTABLE(0b, 0b)
8717 +#endif
8718 +
8719 + : "+m" (v->counter));
8720 +}
8721 +
8722 +/**
8723 + * atomic_inc_unchecked - increment atomic variable
8724 + * @v: pointer of type atomic_unchecked_t
8725 + *
8726 + * Atomically increments @v by 1.
8727 + */
8728 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8729 +{
8730 + asm volatile(LOCK_PREFIX "incl %0\n"
8731 : "+m" (v->counter));
8732 }
8733
8734 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8735 */
8736 static inline void atomic_dec(atomic_t *v)
8737 {
8738 - asm volatile(LOCK_PREFIX "decl %0"
8739 + asm volatile(LOCK_PREFIX "decl %0\n"
8740 +
8741 +#ifdef CONFIG_PAX_REFCOUNT
8742 + "jno 0f\n"
8743 + LOCK_PREFIX "incl %0\n"
8744 + "int $4\n0:\n"
8745 + _ASM_EXTABLE(0b, 0b)
8746 +#endif
8747 +
8748 + : "+m" (v->counter));
8749 +}
8750 +
8751 +/**
8752 + * atomic_dec_unchecked - decrement atomic variable
8753 + * @v: pointer of type atomic_unchecked_t
8754 + *
8755 + * Atomically decrements @v by 1.
8756 + */
8757 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8758 +{
8759 + asm volatile(LOCK_PREFIX "decl %0\n"
8760 : "+m" (v->counter));
8761 }
8762
8763 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8764 {
8765 unsigned char c;
8766
8767 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
8768 + asm volatile(LOCK_PREFIX "decl %0\n"
8769 +
8770 +#ifdef CONFIG_PAX_REFCOUNT
8771 + "jno 0f\n"
8772 + LOCK_PREFIX "incl %0\n"
8773 + "int $4\n0:\n"
8774 + _ASM_EXTABLE(0b, 0b)
8775 +#endif
8776 +
8777 + "sete %1\n"
8778 : "+m" (v->counter), "=qm" (c)
8779 : : "memory");
8780 return c != 0;
8781 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8782 {
8783 unsigned char c;
8784
8785 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
8786 + asm volatile(LOCK_PREFIX "incl %0\n"
8787 +
8788 +#ifdef CONFIG_PAX_REFCOUNT
8789 + "jno 0f\n"
8790 + LOCK_PREFIX "decl %0\n"
8791 + "into\n0:\n"
8792 + _ASM_EXTABLE(0b, 0b)
8793 +#endif
8794 +
8795 + "sete %1\n"
8796 + : "+m" (v->counter), "=qm" (c)
8797 + : : "memory");
8798 + return c != 0;
8799 +}
8800 +
8801 +/**
8802 + * atomic_inc_and_test_unchecked - increment and test
8803 + * @v: pointer of type atomic_unchecked_t
8804 + *
8805 + * Atomically increments @v by 1
8806 + * and returns true if the result is zero, or false for all
8807 + * other cases.
8808 + */
8809 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8810 +{
8811 + unsigned char c;
8812 +
8813 + asm volatile(LOCK_PREFIX "incl %0\n"
8814 + "sete %1\n"
8815 : "+m" (v->counter), "=qm" (c)
8816 : : "memory");
8817 return c != 0;
8818 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8819 {
8820 unsigned char c;
8821
8822 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8823 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
8824 +
8825 +#ifdef CONFIG_PAX_REFCOUNT
8826 + "jno 0f\n"
8827 + LOCK_PREFIX "subl %2,%0\n"
8828 + "int $4\n0:\n"
8829 + _ASM_EXTABLE(0b, 0b)
8830 +#endif
8831 +
8832 + "sets %1\n"
8833 : "+m" (v->counter), "=qm" (c)
8834 : "ir" (i) : "memory");
8835 return c;
8836 @@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
8837 #endif
8838 /* Modern 486+ processor */
8839 __i = i;
8840 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
8841 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8842 +
8843 +#ifdef CONFIG_PAX_REFCOUNT
8844 + "jno 0f\n"
8845 + "movl %0, %1\n"
8846 + "int $4\n0:\n"
8847 + _ASM_EXTABLE(0b, 0b)
8848 +#endif
8849 +
8850 : "+r" (i), "+m" (v->counter)
8851 : : "memory");
8852 return i + __i;
8853 @@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
8854 }
8855
8856 /**
8857 + * atomic_add_return_unchecked - add integer and return
8858 + * @v: pointer of type atomic_unchecked_t
8859 + * @i: integer value to add
8860 + *
8861 + * Atomically adds @i to @v and returns @i + @v
8862 + */
8863 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8864 +{
8865 + int __i;
8866 +#ifdef CONFIG_M386
8867 + unsigned long flags;
8868 + if (unlikely(boot_cpu_data.x86 <= 3))
8869 + goto no_xadd;
8870 +#endif
8871 + /* Modern 486+ processor */
8872 + __i = i;
8873 + asm volatile(LOCK_PREFIX "xaddl %0, %1"
8874 + : "+r" (i), "+m" (v->counter)
8875 + : : "memory");
8876 + return i + __i;
8877 +
8878 +#ifdef CONFIG_M386
8879 +no_xadd: /* Legacy 386 processor */
8880 + local_irq_save(flags);
8881 + __i = atomic_read_unchecked(v);
8882 + atomic_set_unchecked(v, i + __i);
8883 + local_irq_restore(flags);
8884 + return i + __i;
8885 +#endif
8886 +}
8887 +
8888 +/**
8889 * atomic_sub_return - subtract integer and return
8890 * @v: pointer of type atomic_t
8891 * @i: integer value to subtract
8892 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8893 return cmpxchg(&v->counter, old, new);
8894 }
8895
8896 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8897 +{
8898 + return cmpxchg(&v->counter, old, new);
8899 +}
8900 +
8901 static inline int atomic_xchg(atomic_t *v, int new)
8902 {
8903 return xchg(&v->counter, new);
8904 }
8905
8906 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8907 +{
8908 + return xchg(&v->counter, new);
8909 +}
8910 +
8911 /**
8912 * atomic_add_unless - add unless the number is already a given value
8913 * @v: pointer of type atomic_t
8914 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8915 */
8916 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8917 {
8918 - int c, old;
8919 + int c, old, new;
8920 c = atomic_read(v);
8921 for (;;) {
8922 - if (unlikely(c == (u)))
8923 + if (unlikely(c == u))
8924 break;
8925 - old = atomic_cmpxchg((v), c, c + (a));
8926 +
8927 + asm volatile("addl %2,%0\n"
8928 +
8929 +#ifdef CONFIG_PAX_REFCOUNT
8930 + "jno 0f\n"
8931 + "subl %2,%0\n"
8932 + "int $4\n0:\n"
8933 + _ASM_EXTABLE(0b, 0b)
8934 +#endif
8935 +
8936 + : "=r" (new)
8937 + : "0" (c), "ir" (a));
8938 +
8939 + old = atomic_cmpxchg(v, c, new);
8940 if (likely(old == c))
8941 break;
8942 c = old;
8943 }
8944 - return c != (u);
8945 + return c != u;
8946 }
8947
8948 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8949
8950 #define atomic_inc_return(v) (atomic_add_return(1, v))
8951 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8952 +{
8953 + return atomic_add_return_unchecked(1, v);
8954 +}
8955 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8956
8957 /* These are x86-specific, used by some header files */
8958 @@ -266,9 +495,18 @@ typedef struct {
8959 u64 __aligned(8) counter;
8960 } atomic64_t;
8961
8962 +#ifdef CONFIG_PAX_REFCOUNT
8963 +typedef struct {
8964 + u64 __aligned(8) counter;
8965 +} atomic64_unchecked_t;
8966 +#else
8967 +typedef atomic64_t atomic64_unchecked_t;
8968 +#endif
8969 +
8970 #define ATOMIC64_INIT(val) { (val) }
8971
8972 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8973 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8974
8975 /**
8976 * atomic64_xchg - xchg atomic64 variable
8977 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8978 * the old value.
8979 */
8980 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8981 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8982
8983 /**
8984 * atomic64_set - set atomic64 variable
8985 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8986 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8987
8988 /**
8989 + * atomic64_unchecked_set - set atomic64 variable
8990 + * @ptr: pointer to type atomic64_unchecked_t
8991 + * @new_val: value to assign
8992 + *
8993 + * Atomically sets the value of @ptr to @new_val.
8994 + */
8995 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8996 +
8997 +/**
8998 * atomic64_read - read atomic64 variable
8999 * @ptr: pointer to type atomic64_t
9000 *
9001 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
9002 return res;
9003 }
9004
9005 -extern u64 atomic64_read(atomic64_t *ptr);
9006 +/**
9007 + * atomic64_read_unchecked - read atomic64 variable
9008 + * @ptr: pointer to type atomic64_unchecked_t
9009 + *
9010 + * Atomically reads the value of @ptr and returns it.
9011 + */
9012 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
9013 +{
9014 + u64 res;
9015 +
9016 + /*
9017 + * Note, we inline this atomic64_unchecked_t primitive because
9018 + * it only clobbers EAX/EDX and leaves the others
9019 + * untouched. We also (somewhat subtly) rely on the
9020 + * fact that cmpxchg8b returns the current 64-bit value
9021 + * of the memory location we are touching:
9022 + */
9023 + asm volatile(
9024 + "mov %%ebx, %%eax\n\t"
9025 + "mov %%ecx, %%edx\n\t"
9026 + LOCK_PREFIX "cmpxchg8b %1\n"
9027 + : "=&A" (res)
9028 + : "m" (*ptr)
9029 + );
9030 +
9031 + return res;
9032 +}
9033
9034 /**
9035 * atomic64_add_return - add and return
9036 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
9037 * Other variants with different arithmetic operators:
9038 */
9039 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
9040 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9041 extern u64 atomic64_inc_return(atomic64_t *ptr);
9042 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
9043 extern u64 atomic64_dec_return(atomic64_t *ptr);
9044 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
9045
9046 /**
9047 * atomic64_add - add integer to atomic64 variable
9048 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
9049 extern void atomic64_add(u64 delta, atomic64_t *ptr);
9050
9051 /**
9052 + * atomic64_add_unchecked - add integer to atomic64 variable
9053 + * @delta: integer value to add
9054 + * @ptr: pointer to type atomic64_unchecked_t
9055 + *
9056 + * Atomically adds @delta to @ptr.
9057 + */
9058 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9059 +
9060 +/**
9061 * atomic64_sub - subtract the atomic64 variable
9062 * @delta: integer value to subtract
9063 * @ptr: pointer to type atomic64_t
9064 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
9065 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
9066
9067 /**
9068 + * atomic64_sub_unchecked - subtract the atomic64 variable
9069 + * @delta: integer value to subtract
9070 + * @ptr: pointer to type atomic64_unchecked_t
9071 + *
9072 + * Atomically subtracts @delta from @ptr.
9073 + */
9074 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9075 +
9076 +/**
9077 * atomic64_sub_and_test - subtract value from variable and test result
9078 * @delta: integer value to subtract
9079 * @ptr: pointer to type atomic64_t
9080 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
9081 extern void atomic64_inc(atomic64_t *ptr);
9082
9083 /**
9084 + * atomic64_inc_unchecked - increment atomic64 variable
9085 + * @ptr: pointer to type atomic64_unchecked_t
9086 + *
9087 + * Atomically increments @ptr by 1.
9088 + */
9089 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
9090 +
9091 +/**
9092 * atomic64_dec - decrement atomic64 variable
9093 * @ptr: pointer to type atomic64_t
9094 *
9095 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
9096 extern void atomic64_dec(atomic64_t *ptr);
9097
9098 /**
9099 + * atomic64_dec_unchecked - decrement atomic64 variable
9100 + * @ptr: pointer to type atomic64_unchecked_t
9101 + *
9102 + * Atomically decrements @ptr by 1.
9103 + */
9104 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
9105 +
9106 +/**
9107 * atomic64_dec_and_test - decrement and test
9108 * @ptr: pointer to type atomic64_t
9109 *
9110 diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
9111 index d605dc2..fafd7bd 100644
9112 --- a/arch/x86/include/asm/atomic_64.h
9113 +++ b/arch/x86/include/asm/atomic_64.h
9114 @@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
9115 }
9116
9117 /**
9118 + * atomic_read_unchecked - read atomic variable
9119 + * @v: pointer of type atomic_unchecked_t
9120 + *
9121 + * Atomically reads the value of @v.
9122 + */
9123 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9124 +{
9125 + return v->counter;
9126 +}
9127 +
9128 +/**
9129 * atomic_set - set atomic variable
9130 * @v: pointer of type atomic_t
9131 * @i: required value
9132 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
9133 }
9134
9135 /**
9136 + * atomic_set_unchecked - set atomic variable
9137 + * @v: pointer of type atomic_unchecked_t
9138 + * @i: required value
9139 + *
9140 + * Atomically sets the value of @v to @i.
9141 + */
9142 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9143 +{
9144 + v->counter = i;
9145 +}
9146 +
9147 +/**
9148 * atomic_add - add integer to atomic variable
9149 * @i: integer value to add
9150 * @v: pointer of type atomic_t
9151 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
9152 */
9153 static inline void atomic_add(int i, atomic_t *v)
9154 {
9155 - asm volatile(LOCK_PREFIX "addl %1,%0"
9156 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9157 +
9158 +#ifdef CONFIG_PAX_REFCOUNT
9159 + "jno 0f\n"
9160 + LOCK_PREFIX "subl %1,%0\n"
9161 + "int $4\n0:\n"
9162 + _ASM_EXTABLE(0b, 0b)
9163 +#endif
9164 +
9165 + : "=m" (v->counter)
9166 + : "ir" (i), "m" (v->counter));
9167 +}
9168 +
9169 +/**
9170 + * atomic_add_unchecked - add integer to atomic variable
9171 + * @i: integer value to add
9172 + * @v: pointer of type atomic_unchecked_t
9173 + *
9174 + * Atomically adds @i to @v.
9175 + */
9176 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9177 +{
9178 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9179 : "=m" (v->counter)
9180 : "ir" (i), "m" (v->counter));
9181 }
9182 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
9183 */
9184 static inline void atomic_sub(int i, atomic_t *v)
9185 {
9186 - asm volatile(LOCK_PREFIX "subl %1,%0"
9187 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9188 +
9189 +#ifdef CONFIG_PAX_REFCOUNT
9190 + "jno 0f\n"
9191 + LOCK_PREFIX "addl %1,%0\n"
9192 + "int $4\n0:\n"
9193 + _ASM_EXTABLE(0b, 0b)
9194 +#endif
9195 +
9196 + : "=m" (v->counter)
9197 + : "ir" (i), "m" (v->counter));
9198 +}
9199 +
9200 +/**
9201 + * atomic_sub_unchecked - subtract the atomic variable
9202 + * @i: integer value to subtract
9203 + * @v: pointer of type atomic_unchecked_t
9204 + *
9205 + * Atomically subtracts @i from @v.
9206 + */
9207 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9208 +{
9209 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9210 : "=m" (v->counter)
9211 : "ir" (i), "m" (v->counter));
9212 }
9213 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9214 {
9215 unsigned char c;
9216
9217 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9218 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9219 +
9220 +#ifdef CONFIG_PAX_REFCOUNT
9221 + "jno 0f\n"
9222 + LOCK_PREFIX "addl %2,%0\n"
9223 + "int $4\n0:\n"
9224 + _ASM_EXTABLE(0b, 0b)
9225 +#endif
9226 +
9227 + "sete %1\n"
9228 : "=m" (v->counter), "=qm" (c)
9229 : "ir" (i), "m" (v->counter) : "memory");
9230 return c;
9231 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9232 */
9233 static inline void atomic_inc(atomic_t *v)
9234 {
9235 - asm volatile(LOCK_PREFIX "incl %0"
9236 + asm volatile(LOCK_PREFIX "incl %0\n"
9237 +
9238 +#ifdef CONFIG_PAX_REFCOUNT
9239 + "jno 0f\n"
9240 + LOCK_PREFIX "decl %0\n"
9241 + "int $4\n0:\n"
9242 + _ASM_EXTABLE(0b, 0b)
9243 +#endif
9244 +
9245 + : "=m" (v->counter)
9246 + : "m" (v->counter));
9247 +}
9248 +
9249 +/**
9250 + * atomic_inc_unchecked - increment atomic variable
9251 + * @v: pointer of type atomic_unchecked_t
9252 + *
9253 + * Atomically increments @v by 1.
9254 + */
9255 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9256 +{
9257 + asm volatile(LOCK_PREFIX "incl %0\n"
9258 : "=m" (v->counter)
9259 : "m" (v->counter));
9260 }
9261 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
9262 */
9263 static inline void atomic_dec(atomic_t *v)
9264 {
9265 - asm volatile(LOCK_PREFIX "decl %0"
9266 + asm volatile(LOCK_PREFIX "decl %0\n"
9267 +
9268 +#ifdef CONFIG_PAX_REFCOUNT
9269 + "jno 0f\n"
9270 + LOCK_PREFIX "incl %0\n"
9271 + "int $4\n0:\n"
9272 + _ASM_EXTABLE(0b, 0b)
9273 +#endif
9274 +
9275 + : "=m" (v->counter)
9276 + : "m" (v->counter));
9277 +}
9278 +
9279 +/**
9280 + * atomic_dec_unchecked - decrement atomic variable
9281 + * @v: pointer of type atomic_unchecked_t
9282 + *
9283 + * Atomically decrements @v by 1.
9284 + */
9285 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9286 +{
9287 + asm volatile(LOCK_PREFIX "decl %0\n"
9288 : "=m" (v->counter)
9289 : "m" (v->counter));
9290 }
9291 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9292 {
9293 unsigned char c;
9294
9295 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9296 + asm volatile(LOCK_PREFIX "decl %0\n"
9297 +
9298 +#ifdef CONFIG_PAX_REFCOUNT
9299 + "jno 0f\n"
9300 + LOCK_PREFIX "incl %0\n"
9301 + "int $4\n0:\n"
9302 + _ASM_EXTABLE(0b, 0b)
9303 +#endif
9304 +
9305 + "sete %1\n"
9306 : "=m" (v->counter), "=qm" (c)
9307 : "m" (v->counter) : "memory");
9308 return c != 0;
9309 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9310 {
9311 unsigned char c;
9312
9313 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9314 + asm volatile(LOCK_PREFIX "incl %0\n"
9315 +
9316 +#ifdef CONFIG_PAX_REFCOUNT
9317 + "jno 0f\n"
9318 + LOCK_PREFIX "decl %0\n"
9319 + "int $4\n0:\n"
9320 + _ASM_EXTABLE(0b, 0b)
9321 +#endif
9322 +
9323 + "sete %1\n"
9324 + : "=m" (v->counter), "=qm" (c)
9325 + : "m" (v->counter) : "memory");
9326 + return c != 0;
9327 +}
9328 +
9329 +/**
9330 + * atomic_inc_and_test_unchecked - increment and test
9331 + * @v: pointer of type atomic_unchecked_t
9332 + *
9333 + * Atomically increments @v by 1
9334 + * and returns true if the result is zero, or false for all
9335 + * other cases.
9336 + */
9337 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9338 +{
9339 + unsigned char c;
9340 +
9341 + asm volatile(LOCK_PREFIX "incl %0\n"
9342 + "sete %1\n"
9343 : "=m" (v->counter), "=qm" (c)
9344 : "m" (v->counter) : "memory");
9345 return c != 0;
9346 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9347 {
9348 unsigned char c;
9349
9350 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9351 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9352 +
9353 +#ifdef CONFIG_PAX_REFCOUNT
9354 + "jno 0f\n"
9355 + LOCK_PREFIX "subl %2,%0\n"
9356 + "int $4\n0:\n"
9357 + _ASM_EXTABLE(0b, 0b)
9358 +#endif
9359 +
9360 + "sets %1\n"
9361 : "=m" (v->counter), "=qm" (c)
9362 : "ir" (i), "m" (v->counter) : "memory");
9363 return c;
9364 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9365 static inline int atomic_add_return(int i, atomic_t *v)
9366 {
9367 int __i = i;
9368 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
9369 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9370 +
9371 +#ifdef CONFIG_PAX_REFCOUNT
9372 + "jno 0f\n"
9373 + "movl %0, %1\n"
9374 + "int $4\n0:\n"
9375 + _ASM_EXTABLE(0b, 0b)
9376 +#endif
9377 +
9378 + : "+r" (i), "+m" (v->counter)
9379 + : : "memory");
9380 + return i + __i;
9381 +}
9382 +
9383 +/**
9384 + * atomic_add_return_unchecked - add and return
9385 + * @i: integer value to add
9386 + * @v: pointer of type atomic_unchecked_t
9387 + *
9388 + * Atomically adds @i to @v and returns @i + @v
9389 + */
9390 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9391 +{
9392 + int __i = i;
9393 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9394 : "+r" (i), "+m" (v->counter)
9395 : : "memory");
9396 return i + __i;
9397 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9398 }
9399
9400 #define atomic_inc_return(v) (atomic_add_return(1, v))
9401 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9402 +{
9403 + return atomic_add_return_unchecked(1, v);
9404 +}
9405 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9406
9407 /* The 64-bit atomic type */
9408 @@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9409 }
9410
9411 /**
9412 + * atomic64_read_unchecked - read atomic64 variable
9413 + * @v: pointer of type atomic64_unchecked_t
9414 + *
9415 + * Atomically reads the value of @v.
9416 + * Doesn't imply a read memory barrier.
9417 + */
9418 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9419 +{
9420 + return v->counter;
9421 +}
9422 +
9423 +/**
9424 * atomic64_set - set atomic64 variable
9425 * @v: pointer to type atomic64_t
9426 * @i: required value
9427 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9428 }
9429
9430 /**
9431 + * atomic64_set_unchecked - set atomic64 variable
9432 + * @v: pointer to type atomic64_unchecked_t
9433 + * @i: required value
9434 + *
9435 + * Atomically sets the value of @v to @i.
9436 + */
9437 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9438 +{
9439 + v->counter = i;
9440 +}
9441 +
9442 +/**
9443 * atomic64_add - add integer to atomic64 variable
9444 * @i: integer value to add
9445 * @v: pointer to type atomic64_t
9446 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9447 */
9448 static inline void atomic64_add(long i, atomic64_t *v)
9449 {
9450 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9451 +
9452 +#ifdef CONFIG_PAX_REFCOUNT
9453 + "jno 0f\n"
9454 + LOCK_PREFIX "subq %1,%0\n"
9455 + "int $4\n0:\n"
9456 + _ASM_EXTABLE(0b, 0b)
9457 +#endif
9458 +
9459 + : "=m" (v->counter)
9460 + : "er" (i), "m" (v->counter));
9461 +}
9462 +
9463 +/**
9464 + * atomic64_add_unchecked - add integer to atomic64 variable
9465 + * @i: integer value to add
9466 + * @v: pointer to type atomic64_unchecked_t
9467 + *
9468 + * Atomically adds @i to @v.
9469 + */
9470 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9471 +{
9472 asm volatile(LOCK_PREFIX "addq %1,%0"
9473 : "=m" (v->counter)
9474 : "er" (i), "m" (v->counter));
9475 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9476 */
9477 static inline void atomic64_sub(long i, atomic64_t *v)
9478 {
9479 - asm volatile(LOCK_PREFIX "subq %1,%0"
9480 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9481 +
9482 +#ifdef CONFIG_PAX_REFCOUNT
9483 + "jno 0f\n"
9484 + LOCK_PREFIX "addq %1,%0\n"
9485 + "int $4\n0:\n"
9486 + _ASM_EXTABLE(0b, 0b)
9487 +#endif
9488 +
9489 : "=m" (v->counter)
9490 : "er" (i), "m" (v->counter));
9491 }
9492 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9493 {
9494 unsigned char c;
9495
9496 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9497 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9498 +
9499 +#ifdef CONFIG_PAX_REFCOUNT
9500 + "jno 0f\n"
9501 + LOCK_PREFIX "addq %2,%0\n"
9502 + "int $4\n0:\n"
9503 + _ASM_EXTABLE(0b, 0b)
9504 +#endif
9505 +
9506 + "sete %1\n"
9507 : "=m" (v->counter), "=qm" (c)
9508 : "er" (i), "m" (v->counter) : "memory");
9509 return c;
9510 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9511 */
9512 static inline void atomic64_inc(atomic64_t *v)
9513 {
9514 + asm volatile(LOCK_PREFIX "incq %0\n"
9515 +
9516 +#ifdef CONFIG_PAX_REFCOUNT
9517 + "jno 0f\n"
9518 + LOCK_PREFIX "decq %0\n"
9519 + "int $4\n0:\n"
9520 + _ASM_EXTABLE(0b, 0b)
9521 +#endif
9522 +
9523 + : "=m" (v->counter)
9524 + : "m" (v->counter));
9525 +}
9526 +
9527 +/**
9528 + * atomic64_inc_unchecked - increment atomic64 variable
9529 + * @v: pointer to type atomic64_unchecked_t
9530 + *
9531 + * Atomically increments @v by 1.
9532 + */
9533 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9534 +{
9535 asm volatile(LOCK_PREFIX "incq %0"
9536 : "=m" (v->counter)
9537 : "m" (v->counter));
9538 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9539 */
9540 static inline void atomic64_dec(atomic64_t *v)
9541 {
9542 - asm volatile(LOCK_PREFIX "decq %0"
9543 + asm volatile(LOCK_PREFIX "decq %0\n"
9544 +
9545 +#ifdef CONFIG_PAX_REFCOUNT
9546 + "jno 0f\n"
9547 + LOCK_PREFIX "incq %0\n"
9548 + "int $4\n0:\n"
9549 + _ASM_EXTABLE(0b, 0b)
9550 +#endif
9551 +
9552 + : "=m" (v->counter)
9553 + : "m" (v->counter));
9554 +}
9555 +
9556 +/**
9557 + * atomic64_dec_unchecked - decrement atomic64 variable
9558 + * @v: pointer to type atomic64_t
9559 + *
9560 + * Atomically decrements @v by 1.
9561 + */
9562 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9563 +{
9564 + asm volatile(LOCK_PREFIX "decq %0\n"
9565 : "=m" (v->counter)
9566 : "m" (v->counter));
9567 }
9568 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9569 {
9570 unsigned char c;
9571
9572 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9573 + asm volatile(LOCK_PREFIX "decq %0\n"
9574 +
9575 +#ifdef CONFIG_PAX_REFCOUNT
9576 + "jno 0f\n"
9577 + LOCK_PREFIX "incq %0\n"
9578 + "int $4\n0:\n"
9579 + _ASM_EXTABLE(0b, 0b)
9580 +#endif
9581 +
9582 + "sete %1\n"
9583 : "=m" (v->counter), "=qm" (c)
9584 : "m" (v->counter) : "memory");
9585 return c != 0;
9586 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9587 {
9588 unsigned char c;
9589
9590 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9591 + asm volatile(LOCK_PREFIX "incq %0\n"
9592 +
9593 +#ifdef CONFIG_PAX_REFCOUNT
9594 + "jno 0f\n"
9595 + LOCK_PREFIX "decq %0\n"
9596 + "int $4\n0:\n"
9597 + _ASM_EXTABLE(0b, 0b)
9598 +#endif
9599 +
9600 + "sete %1\n"
9601 : "=m" (v->counter), "=qm" (c)
9602 : "m" (v->counter) : "memory");
9603 return c != 0;
9604 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9605 {
9606 unsigned char c;
9607
9608 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9609 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9610 +
9611 +#ifdef CONFIG_PAX_REFCOUNT
9612 + "jno 0f\n"
9613 + LOCK_PREFIX "subq %2,%0\n"
9614 + "int $4\n0:\n"
9615 + _ASM_EXTABLE(0b, 0b)
9616 +#endif
9617 +
9618 + "sets %1\n"
9619 : "=m" (v->counter), "=qm" (c)
9620 : "er" (i), "m" (v->counter) : "memory");
9621 return c;
9622 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9623 static inline long atomic64_add_return(long i, atomic64_t *v)
9624 {
9625 long __i = i;
9626 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9627 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9628 +
9629 +#ifdef CONFIG_PAX_REFCOUNT
9630 + "jno 0f\n"
9631 + "movq %0, %1\n"
9632 + "int $4\n0:\n"
9633 + _ASM_EXTABLE(0b, 0b)
9634 +#endif
9635 +
9636 + : "+r" (i), "+m" (v->counter)
9637 + : : "memory");
9638 + return i + __i;
9639 +}
9640 +
9641 +/**
9642 + * atomic64_add_return_unchecked - add and return
9643 + * @i: integer value to add
9644 + * @v: pointer to type atomic64_unchecked_t
9645 + *
9646 + * Atomically adds @i to @v and returns @i + @v
9647 + */
9648 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9649 +{
9650 + long __i = i;
9651 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
9652 : "+r" (i), "+m" (v->counter)
9653 : : "memory");
9654 return i + __i;
9655 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9656 }
9657
9658 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9659 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9660 +{
9661 + return atomic64_add_return_unchecked(1, v);
9662 +}
9663 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9664
9665 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9666 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9667 return cmpxchg(&v->counter, old, new);
9668 }
9669
9670 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9671 +{
9672 + return cmpxchg(&v->counter, old, new);
9673 +}
9674 +
9675 static inline long atomic64_xchg(atomic64_t *v, long new)
9676 {
9677 return xchg(&v->counter, new);
9678 }
9679
9680 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9681 +{
9682 + return xchg(&v->counter, new);
9683 +}
9684 +
9685 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9686 {
9687 return cmpxchg(&v->counter, old, new);
9688 }
9689
9690 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9691 +{
9692 + return cmpxchg(&v->counter, old, new);
9693 +}
9694 +
9695 static inline long atomic_xchg(atomic_t *v, int new)
9696 {
9697 return xchg(&v->counter, new);
9698 }
9699
9700 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9701 +{
9702 + return xchg(&v->counter, new);
9703 +}
9704 +
9705 /**
9706 * atomic_add_unless - add unless the number is a given value
9707 * @v: pointer of type atomic_t
9708 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9709 */
9710 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9711 {
9712 - int c, old;
9713 + int c, old, new;
9714 c = atomic_read(v);
9715 for (;;) {
9716 - if (unlikely(c == (u)))
9717 + if (unlikely(c == u))
9718 break;
9719 - old = atomic_cmpxchg((v), c, c + (a));
9720 +
9721 + asm volatile("addl %2,%0\n"
9722 +
9723 +#ifdef CONFIG_PAX_REFCOUNT
9724 + "jno 0f\n"
9725 + "subl %2,%0\n"
9726 + "int $4\n0:\n"
9727 + _ASM_EXTABLE(0b, 0b)
9728 +#endif
9729 +
9730 + : "=r" (new)
9731 + : "0" (c), "ir" (a));
9732 +
9733 + old = atomic_cmpxchg(v, c, new);
9734 if (likely(old == c))
9735 break;
9736 c = old;
9737 }
9738 - return c != (u);
9739 + return c != u;
9740 }
9741
9742 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9743 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9744 */
9745 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9746 {
9747 - long c, old;
9748 + long c, old, new;
9749 c = atomic64_read(v);
9750 for (;;) {
9751 - if (unlikely(c == (u)))
9752 + if (unlikely(c == u))
9753 break;
9754 - old = atomic64_cmpxchg((v), c, c + (a));
9755 +
9756 + asm volatile("addq %2,%0\n"
9757 +
9758 +#ifdef CONFIG_PAX_REFCOUNT
9759 + "jno 0f\n"
9760 + "subq %2,%0\n"
9761 + "int $4\n0:\n"
9762 + _ASM_EXTABLE(0b, 0b)
9763 +#endif
9764 +
9765 + : "=r" (new)
9766 + : "0" (c), "er" (a));
9767 +
9768 + old = atomic64_cmpxchg(v, c, new);
9769 if (likely(old == c))
9770 break;
9771 c = old;
9772 }
9773 - return c != (u);
9774 + return c != u;
9775 }
9776
9777 /**
9778 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9779 index 02b47a6..d5c4b15 100644
9780 --- a/arch/x86/include/asm/bitops.h
9781 +++ b/arch/x86/include/asm/bitops.h
9782 @@ -38,7 +38,7 @@
9783 * a mask operation on a byte.
9784 */
9785 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9786 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9787 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9788 #define CONST_MASK(nr) (1 << ((nr) & 7))
9789
9790 /**
9791 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9792 index 7a10659..8bbf355 100644
9793 --- a/arch/x86/include/asm/boot.h
9794 +++ b/arch/x86/include/asm/boot.h
9795 @@ -11,10 +11,15 @@
9796 #include <asm/pgtable_types.h>
9797
9798 /* Physical address where kernel should be loaded. */
9799 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9800 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9801 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9802 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9803
9804 +#ifndef __ASSEMBLY__
9805 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9806 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9807 +#endif
9808 +
9809 /* Minimum kernel alignment, as a power of two */
9810 #ifdef CONFIG_X86_64
9811 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9812 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9813 index 549860d..7d45f68 100644
9814 --- a/arch/x86/include/asm/cache.h
9815 +++ b/arch/x86/include/asm/cache.h
9816 @@ -5,9 +5,10 @@
9817
9818 /* L1 cache line size */
9819 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9820 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9821 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9822
9823 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9824 +#define __read_only __attribute__((__section__(".data.read_only")))
9825
9826 #ifdef CONFIG_X86_VSMP
9827 /* vSMP Internode cacheline shift */
9828 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9829 index b54f6af..5b376a6 100644
9830 --- a/arch/x86/include/asm/cacheflush.h
9831 +++ b/arch/x86/include/asm/cacheflush.h
9832 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9833 static inline unsigned long get_page_memtype(struct page *pg)
9834 {
9835 if (!PageUncached(pg) && !PageWC(pg))
9836 - return -1;
9837 + return ~0UL;
9838 else if (!PageUncached(pg) && PageWC(pg))
9839 return _PAGE_CACHE_WC;
9840 else if (PageUncached(pg) && !PageWC(pg))
9841 @@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9842 SetPageWC(pg);
9843 break;
9844 default:
9845 - case -1:
9846 + case ~0UL:
9847 ClearPageUncached(pg);
9848 ClearPageWC(pg);
9849 break;
9850 diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9851 index 0e63c9a..ab8d972 100644
9852 --- a/arch/x86/include/asm/calling.h
9853 +++ b/arch/x86/include/asm/calling.h
9854 @@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9855 * for assembly code:
9856 */
9857
9858 -#define R15 0
9859 -#define R14 8
9860 -#define R13 16
9861 -#define R12 24
9862 -#define RBP 32
9863 -#define RBX 40
9864 +#define R15 (0)
9865 +#define R14 (8)
9866 +#define R13 (16)
9867 +#define R12 (24)
9868 +#define RBP (32)
9869 +#define RBX (40)
9870
9871 /* arguments: interrupts/non tracing syscalls only save up to here: */
9872 -#define R11 48
9873 -#define R10 56
9874 -#define R9 64
9875 -#define R8 72
9876 -#define RAX 80
9877 -#define RCX 88
9878 -#define RDX 96
9879 -#define RSI 104
9880 -#define RDI 112
9881 -#define ORIG_RAX 120 /* + error_code */
9882 +#define R11 (48)
9883 +#define R10 (56)
9884 +#define R9 (64)
9885 +#define R8 (72)
9886 +#define RAX (80)
9887 +#define RCX (88)
9888 +#define RDX (96)
9889 +#define RSI (104)
9890 +#define RDI (112)
9891 +#define ORIG_RAX (120) /* + error_code */
9892 /* end of arguments */
9893
9894 /* cpu exception frame or undefined in case of fast syscall: */
9895 -#define RIP 128
9896 -#define CS 136
9897 -#define EFLAGS 144
9898 -#define RSP 152
9899 -#define SS 160
9900 +#define RIP (128)
9901 +#define CS (136)
9902 +#define EFLAGS (144)
9903 +#define RSP (152)
9904 +#define SS (160)
9905
9906 #define ARGOFFSET R11
9907 #define SWFRAME ORIG_RAX
9908 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9909 index 46fc474..b02b0f9 100644
9910 --- a/arch/x86/include/asm/checksum_32.h
9911 +++ b/arch/x86/include/asm/checksum_32.h
9912 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9913 int len, __wsum sum,
9914 int *src_err_ptr, int *dst_err_ptr);
9915
9916 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9917 + int len, __wsum sum,
9918 + int *src_err_ptr, int *dst_err_ptr);
9919 +
9920 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9921 + int len, __wsum sum,
9922 + int *src_err_ptr, int *dst_err_ptr);
9923 +
9924 /*
9925 * Note: when you get a NULL pointer exception here this means someone
9926 * passed in an incorrect kernel address to one of these functions.
9927 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9928 int *err_ptr)
9929 {
9930 might_sleep();
9931 - return csum_partial_copy_generic((__force void *)src, dst,
9932 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9933 len, sum, err_ptr, NULL);
9934 }
9935
9936 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9937 {
9938 might_sleep();
9939 if (access_ok(VERIFY_WRITE, dst, len))
9940 - return csum_partial_copy_generic(src, (__force void *)dst,
9941 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9942 len, sum, NULL, err_ptr);
9943
9944 if (len)
9945 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9946 index 617bd56..7b047a1 100644
9947 --- a/arch/x86/include/asm/desc.h
9948 +++ b/arch/x86/include/asm/desc.h
9949 @@ -4,6 +4,7 @@
9950 #include <asm/desc_defs.h>
9951 #include <asm/ldt.h>
9952 #include <asm/mmu.h>
9953 +#include <asm/pgtable.h>
9954 #include <linux/smp.h>
9955
9956 static inline void fill_ldt(struct desc_struct *desc,
9957 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9958 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9959 desc->type = (info->read_exec_only ^ 1) << 1;
9960 desc->type |= info->contents << 2;
9961 + desc->type |= info->seg_not_present ^ 1;
9962 desc->s = 1;
9963 desc->dpl = 0x3;
9964 desc->p = info->seg_not_present ^ 1;
9965 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9966 }
9967
9968 extern struct desc_ptr idt_descr;
9969 -extern gate_desc idt_table[];
9970 -
9971 -struct gdt_page {
9972 - struct desc_struct gdt[GDT_ENTRIES];
9973 -} __attribute__((aligned(PAGE_SIZE)));
9974 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9975 +extern gate_desc idt_table[256];
9976
9977 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9978 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9979 {
9980 - return per_cpu(gdt_page, cpu).gdt;
9981 + return cpu_gdt_table[cpu];
9982 }
9983
9984 #ifdef CONFIG_X86_64
9985 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9986 unsigned long base, unsigned dpl, unsigned flags,
9987 unsigned short seg)
9988 {
9989 - gate->a = (seg << 16) | (base & 0xffff);
9990 - gate->b = (base & 0xffff0000) |
9991 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9992 + gate->gate.offset_low = base;
9993 + gate->gate.seg = seg;
9994 + gate->gate.reserved = 0;
9995 + gate->gate.type = type;
9996 + gate->gate.s = 0;
9997 + gate->gate.dpl = dpl;
9998 + gate->gate.p = 1;
9999 + gate->gate.offset_high = base >> 16;
10000 }
10001
10002 #endif
10003 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10004 static inline void native_write_idt_entry(gate_desc *idt, int entry,
10005 const gate_desc *gate)
10006 {
10007 + pax_open_kernel();
10008 memcpy(&idt[entry], gate, sizeof(*gate));
10009 + pax_close_kernel();
10010 }
10011
10012 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
10013 const void *desc)
10014 {
10015 + pax_open_kernel();
10016 memcpy(&ldt[entry], desc, 8);
10017 + pax_close_kernel();
10018 }
10019
10020 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10021 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10022 size = sizeof(struct desc_struct);
10023 break;
10024 }
10025 +
10026 + pax_open_kernel();
10027 memcpy(&gdt[entry], desc, size);
10028 + pax_close_kernel();
10029 }
10030
10031 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10032 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10033
10034 static inline void native_load_tr_desc(void)
10035 {
10036 + pax_open_kernel();
10037 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10038 + pax_close_kernel();
10039 }
10040
10041 static inline void native_load_gdt(const struct desc_ptr *dtr)
10042 @@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10043 unsigned int i;
10044 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10045
10046 + pax_open_kernel();
10047 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10048 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10049 + pax_close_kernel();
10050 }
10051
10052 #define _LDT_empty(info) \
10053 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10054 desc->limit = (limit >> 16) & 0xf;
10055 }
10056
10057 -static inline void _set_gate(int gate, unsigned type, void *addr,
10058 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10059 unsigned dpl, unsigned ist, unsigned seg)
10060 {
10061 gate_desc s;
10062 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10063 * Pentium F0 0F bugfix can have resulted in the mapped
10064 * IDT being write-protected.
10065 */
10066 -static inline void set_intr_gate(unsigned int n, void *addr)
10067 +static inline void set_intr_gate(unsigned int n, const void *addr)
10068 {
10069 BUG_ON((unsigned)n > 0xFF);
10070 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10071 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10072 /*
10073 * This routine sets up an interrupt gate at directory privilege level 3.
10074 */
10075 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10076 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10077 {
10078 BUG_ON((unsigned)n > 0xFF);
10079 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10080 }
10081
10082 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10083 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10084 {
10085 BUG_ON((unsigned)n > 0xFF);
10086 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10087 }
10088
10089 -static inline void set_trap_gate(unsigned int n, void *addr)
10090 +static inline void set_trap_gate(unsigned int n, const void *addr)
10091 {
10092 BUG_ON((unsigned)n > 0xFF);
10093 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10094 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10095 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10096 {
10097 BUG_ON((unsigned)n > 0xFF);
10098 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10099 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10100 }
10101
10102 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10103 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10104 {
10105 BUG_ON((unsigned)n > 0xFF);
10106 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10107 }
10108
10109 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10110 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10111 {
10112 BUG_ON((unsigned)n > 0xFF);
10113 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10114 }
10115
10116 +#ifdef CONFIG_X86_32
10117 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10118 +{
10119 + struct desc_struct d;
10120 +
10121 + if (likely(limit))
10122 + limit = (limit - 1UL) >> PAGE_SHIFT;
10123 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10124 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10125 +}
10126 +#endif
10127 +
10128 #endif /* _ASM_X86_DESC_H */
10129 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10130 index 9d66848..6b4a691 100644
10131 --- a/arch/x86/include/asm/desc_defs.h
10132 +++ b/arch/x86/include/asm/desc_defs.h
10133 @@ -31,6 +31,12 @@ struct desc_struct {
10134 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10135 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10136 };
10137 + struct {
10138 + u16 offset_low;
10139 + u16 seg;
10140 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10141 + unsigned offset_high: 16;
10142 + } gate;
10143 };
10144 } __attribute__((packed));
10145
10146 diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
10147 index cee34e9..a7c3fa2 100644
10148 --- a/arch/x86/include/asm/device.h
10149 +++ b/arch/x86/include/asm/device.h
10150 @@ -6,7 +6,7 @@ struct dev_archdata {
10151 void *acpi_handle;
10152 #endif
10153 #ifdef CONFIG_X86_64
10154 -struct dma_map_ops *dma_ops;
10155 + const struct dma_map_ops *dma_ops;
10156 #endif
10157 #ifdef CONFIG_DMAR
10158 void *iommu; /* hook for IOMMU specific extension */
10159 diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
10160 index 6a25d5d..786b202 100644
10161 --- a/arch/x86/include/asm/dma-mapping.h
10162 +++ b/arch/x86/include/asm/dma-mapping.h
10163 @@ -25,9 +25,9 @@ extern int iommu_merge;
10164 extern struct device x86_dma_fallback_dev;
10165 extern int panic_on_overflow;
10166
10167 -extern struct dma_map_ops *dma_ops;
10168 +extern const struct dma_map_ops *dma_ops;
10169
10170 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10171 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
10172 {
10173 #ifdef CONFIG_X86_32
10174 return dma_ops;
10175 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10176 /* Make sure we keep the same behaviour */
10177 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
10178 {
10179 - struct dma_map_ops *ops = get_dma_ops(dev);
10180 + const struct dma_map_ops *ops = get_dma_ops(dev);
10181 if (ops->mapping_error)
10182 return ops->mapping_error(dev, dma_addr);
10183
10184 @@ -122,7 +122,7 @@ static inline void *
10185 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10186 gfp_t gfp)
10187 {
10188 - struct dma_map_ops *ops = get_dma_ops(dev);
10189 + const struct dma_map_ops *ops = get_dma_ops(dev);
10190 void *memory;
10191
10192 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
10193 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10194 static inline void dma_free_coherent(struct device *dev, size_t size,
10195 void *vaddr, dma_addr_t bus)
10196 {
10197 - struct dma_map_ops *ops = get_dma_ops(dev);
10198 + const struct dma_map_ops *ops = get_dma_ops(dev);
10199
10200 WARN_ON(irqs_disabled()); /* for portability */
10201
10202 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10203 index 40b4e61..40d8133 100644
10204 --- a/arch/x86/include/asm/e820.h
10205 +++ b/arch/x86/include/asm/e820.h
10206 @@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
10207 #define ISA_END_ADDRESS 0x100000
10208 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
10209
10210 -#define BIOS_BEGIN 0x000a0000
10211 +#define BIOS_BEGIN 0x000c0000
10212 #define BIOS_END 0x00100000
10213
10214 #ifdef __KERNEL__
10215 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10216 index 8ac9d9a..0a6c96e 100644
10217 --- a/arch/x86/include/asm/elf.h
10218 +++ b/arch/x86/include/asm/elf.h
10219 @@ -257,7 +257,25 @@ extern int force_personality32;
10220 the loader. We need to make sure that it is out of the way of the program
10221 that it will "exec", and that there is sufficient room for the brk. */
10222
10223 +#ifdef CONFIG_PAX_SEGMEXEC
10224 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10225 +#else
10226 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10227 +#endif
10228 +
10229 +#ifdef CONFIG_PAX_ASLR
10230 +#ifdef CONFIG_X86_32
10231 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10232 +
10233 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10234 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10235 +#else
10236 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10237 +
10238 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10239 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10240 +#endif
10241 +#endif
10242
10243 /* This yields a mask that user programs can use to figure out what
10244 instruction set this CPU supports. This could be done in user space,
10245 @@ -310,9 +328,7 @@ do { \
10246
10247 #define ARCH_DLINFO \
10248 do { \
10249 - if (vdso_enabled) \
10250 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10251 - (unsigned long)current->mm->context.vdso); \
10252 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10253 } while (0)
10254
10255 #define AT_SYSINFO 32
10256 @@ -323,7 +339,7 @@ do { \
10257
10258 #endif /* !CONFIG_X86_32 */
10259
10260 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10261 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10262
10263 #define VDSO_ENTRY \
10264 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10265 @@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10266 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10267 #define compat_arch_setup_additional_pages syscall32_setup_pages
10268
10269 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10270 -#define arch_randomize_brk arch_randomize_brk
10271 -
10272 #endif /* _ASM_X86_ELF_H */
10273 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10274 index cc70c1c..d96d011 100644
10275 --- a/arch/x86/include/asm/emergency-restart.h
10276 +++ b/arch/x86/include/asm/emergency-restart.h
10277 @@ -15,6 +15,6 @@ enum reboot_type {
10278
10279 extern enum reboot_type reboot_type;
10280
10281 -extern void machine_emergency_restart(void);
10282 +extern void machine_emergency_restart(void) __noreturn;
10283
10284 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10285 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10286 index 1f11ce4..7caabd1 100644
10287 --- a/arch/x86/include/asm/futex.h
10288 +++ b/arch/x86/include/asm/futex.h
10289 @@ -12,16 +12,18 @@
10290 #include <asm/system.h>
10291
10292 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10293 + typecheck(u32 __user *, uaddr); \
10294 asm volatile("1:\t" insn "\n" \
10295 "2:\t.section .fixup,\"ax\"\n" \
10296 "3:\tmov\t%3, %1\n" \
10297 "\tjmp\t2b\n" \
10298 "\t.previous\n" \
10299 _ASM_EXTABLE(1b, 3b) \
10300 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10301 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10302 : "i" (-EFAULT), "0" (oparg), "1" (0))
10303
10304 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10305 + typecheck(u32 __user *, uaddr); \
10306 asm volatile("1:\tmovl %2, %0\n" \
10307 "\tmovl\t%0, %3\n" \
10308 "\t" insn "\n" \
10309 @@ -34,10 +36,10 @@
10310 _ASM_EXTABLE(1b, 4b) \
10311 _ASM_EXTABLE(2b, 4b) \
10312 : "=&a" (oldval), "=&r" (ret), \
10313 - "+m" (*uaddr), "=&r" (tem) \
10314 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10315 : "r" (oparg), "i" (-EFAULT), "1" (0))
10316
10317 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10318 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10319 {
10320 int op = (encoded_op >> 28) & 7;
10321 int cmp = (encoded_op >> 24) & 15;
10322 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10323
10324 switch (op) {
10325 case FUTEX_OP_SET:
10326 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10327 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10328 break;
10329 case FUTEX_OP_ADD:
10330 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10331 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10332 uaddr, oparg);
10333 break;
10334 case FUTEX_OP_OR:
10335 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10336 return ret;
10337 }
10338
10339 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10340 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10341 int newval)
10342 {
10343
10344 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10345 return -ENOSYS;
10346 #endif
10347
10348 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10349 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10350 return -EFAULT;
10351
10352 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10353 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10354 "2:\t.section .fixup, \"ax\"\n"
10355 "3:\tmov %2, %0\n"
10356 "\tjmp 2b\n"
10357 "\t.previous\n"
10358 _ASM_EXTABLE(1b, 3b)
10359 - : "=a" (oldval), "+m" (*uaddr)
10360 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10361 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10362 : "memory"
10363 );
10364 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10365 index ba180d9..3bad351 100644
10366 --- a/arch/x86/include/asm/hw_irq.h
10367 +++ b/arch/x86/include/asm/hw_irq.h
10368 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10369 extern void enable_IO_APIC(void);
10370
10371 /* Statistics */
10372 -extern atomic_t irq_err_count;
10373 -extern atomic_t irq_mis_count;
10374 +extern atomic_unchecked_t irq_err_count;
10375 +extern atomic_unchecked_t irq_mis_count;
10376
10377 /* EISA */
10378 extern void eisa_set_level_irq(unsigned int irq);
10379 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10380 index 0b20bbb..4cb1396 100644
10381 --- a/arch/x86/include/asm/i387.h
10382 +++ b/arch/x86/include/asm/i387.h
10383 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10384 {
10385 int err;
10386
10387 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10388 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10389 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10390 +#endif
10391 +
10392 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10393 "2:\n"
10394 ".section .fixup,\"ax\"\n"
10395 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10396 {
10397 int err;
10398
10399 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10400 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10401 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10402 +#endif
10403 +
10404 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10405 "2:\n"
10406 ".section .fixup,\"ax\"\n"
10407 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10408 }
10409
10410 /* We need a safe address that is cheap to find and that is already
10411 - in L1 during context switch. The best choices are unfortunately
10412 - different for UP and SMP */
10413 -#ifdef CONFIG_SMP
10414 -#define safe_address (__per_cpu_offset[0])
10415 -#else
10416 -#define safe_address (kstat_cpu(0).cpustat.user)
10417 -#endif
10418 + in L1 during context switch. */
10419 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10420
10421 /*
10422 * These must be called with preempt disabled
10423 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10424 struct thread_info *me = current_thread_info();
10425 preempt_disable();
10426 if (me->status & TS_USEDFPU)
10427 - __save_init_fpu(me->task);
10428 + __save_init_fpu(current);
10429 else
10430 clts();
10431 }
10432 diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10433 index a299900..15c5410 100644
10434 --- a/arch/x86/include/asm/io_32.h
10435 +++ b/arch/x86/include/asm/io_32.h
10436 @@ -3,6 +3,7 @@
10437
10438 #include <linux/string.h>
10439 #include <linux/compiler.h>
10440 +#include <asm/processor.h>
10441
10442 /*
10443 * This file contains the definitions for the x86 IO instructions
10444 @@ -42,6 +43,17 @@
10445
10446 #ifdef __KERNEL__
10447
10448 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10449 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10450 +{
10451 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10452 +}
10453 +
10454 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10455 +{
10456 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10457 +}
10458 +
10459 #include <asm-generic/iomap.h>
10460
10461 #include <linux/vmalloc.h>
10462 diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10463 index 2440678..c158b88 100644
10464 --- a/arch/x86/include/asm/io_64.h
10465 +++ b/arch/x86/include/asm/io_64.h
10466 @@ -140,6 +140,17 @@ __OUTS(l)
10467
10468 #include <linux/vmalloc.h>
10469
10470 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10471 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10472 +{
10473 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10474 +}
10475 +
10476 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10477 +{
10478 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10479 +}
10480 +
10481 #include <asm-generic/iomap.h>
10482
10483 void __memcpy_fromio(void *, unsigned long, unsigned);
10484 diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10485 index fd6d21b..8b13915 100644
10486 --- a/arch/x86/include/asm/iommu.h
10487 +++ b/arch/x86/include/asm/iommu.h
10488 @@ -3,7 +3,7 @@
10489
10490 extern void pci_iommu_shutdown(void);
10491 extern void no_iommu_init(void);
10492 -extern struct dma_map_ops nommu_dma_ops;
10493 +extern const struct dma_map_ops nommu_dma_ops;
10494 extern int force_iommu, no_iommu;
10495 extern int iommu_detected;
10496 extern int iommu_pass_through;
10497 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10498 index 9e2b952..557206e 100644
10499 --- a/arch/x86/include/asm/irqflags.h
10500 +++ b/arch/x86/include/asm/irqflags.h
10501 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10502 sti; \
10503 sysexit
10504
10505 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10506 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10507 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10508 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10509 +
10510 #else
10511 #define INTERRUPT_RETURN iret
10512 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10513 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10514 index 4fe681d..bb6d40c 100644
10515 --- a/arch/x86/include/asm/kprobes.h
10516 +++ b/arch/x86/include/asm/kprobes.h
10517 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10518 #define BREAKPOINT_INSTRUCTION 0xcc
10519 #define RELATIVEJUMP_INSTRUCTION 0xe9
10520 #define MAX_INSN_SIZE 16
10521 -#define MAX_STACK_SIZE 64
10522 -#define MIN_STACK_SIZE(ADDR) \
10523 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10524 - THREAD_SIZE - (unsigned long)(ADDR))) \
10525 - ? (MAX_STACK_SIZE) \
10526 - : (((unsigned long)current_thread_info()) + \
10527 - THREAD_SIZE - (unsigned long)(ADDR)))
10528 +#define MAX_STACK_SIZE 64UL
10529 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10530
10531 #define flush_insn_slot(p) do { } while (0)
10532
10533 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10534 index 08bc2ff..2e88d1f 100644
10535 --- a/arch/x86/include/asm/kvm_host.h
10536 +++ b/arch/x86/include/asm/kvm_host.h
10537 @@ -534,9 +534,9 @@ struct kvm_x86_ops {
10538 bool (*gb_page_enable)(void);
10539
10540 const struct trace_print_flags *exit_reasons_str;
10541 -};
10542 +} __do_const;
10543
10544 -extern struct kvm_x86_ops *kvm_x86_ops;
10545 +extern const struct kvm_x86_ops *kvm_x86_ops;
10546
10547 int kvm_mmu_module_init(void);
10548 void kvm_mmu_module_exit(void);
10549 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10550 index 47b9b6f..815aaa1 100644
10551 --- a/arch/x86/include/asm/local.h
10552 +++ b/arch/x86/include/asm/local.h
10553 @@ -18,26 +18,58 @@ typedef struct {
10554
10555 static inline void local_inc(local_t *l)
10556 {
10557 - asm volatile(_ASM_INC "%0"
10558 + asm volatile(_ASM_INC "%0\n"
10559 +
10560 +#ifdef CONFIG_PAX_REFCOUNT
10561 + "jno 0f\n"
10562 + _ASM_DEC "%0\n"
10563 + "int $4\n0:\n"
10564 + _ASM_EXTABLE(0b, 0b)
10565 +#endif
10566 +
10567 : "+m" (l->a.counter));
10568 }
10569
10570 static inline void local_dec(local_t *l)
10571 {
10572 - asm volatile(_ASM_DEC "%0"
10573 + asm volatile(_ASM_DEC "%0\n"
10574 +
10575 +#ifdef CONFIG_PAX_REFCOUNT
10576 + "jno 0f\n"
10577 + _ASM_INC "%0\n"
10578 + "int $4\n0:\n"
10579 + _ASM_EXTABLE(0b, 0b)
10580 +#endif
10581 +
10582 : "+m" (l->a.counter));
10583 }
10584
10585 static inline void local_add(long i, local_t *l)
10586 {
10587 - asm volatile(_ASM_ADD "%1,%0"
10588 + asm volatile(_ASM_ADD "%1,%0\n"
10589 +
10590 +#ifdef CONFIG_PAX_REFCOUNT
10591 + "jno 0f\n"
10592 + _ASM_SUB "%1,%0\n"
10593 + "int $4\n0:\n"
10594 + _ASM_EXTABLE(0b, 0b)
10595 +#endif
10596 +
10597 : "+m" (l->a.counter)
10598 : "ir" (i));
10599 }
10600
10601 static inline void local_sub(long i, local_t *l)
10602 {
10603 - asm volatile(_ASM_SUB "%1,%0"
10604 + asm volatile(_ASM_SUB "%1,%0\n"
10605 +
10606 +#ifdef CONFIG_PAX_REFCOUNT
10607 + "jno 0f\n"
10608 + _ASM_ADD "%1,%0\n"
10609 + "int $4\n0:\n"
10610 + _ASM_EXTABLE(0b, 0b)
10611 +#endif
10612 +
10613 : "+m" (l->a.counter)
10614 : "ir" (i));
10615 }
10616 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10617 {
10618 unsigned char c;
10619
10620 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10621 + asm volatile(_ASM_SUB "%2,%0\n"
10622 +
10623 +#ifdef CONFIG_PAX_REFCOUNT
10624 + "jno 0f\n"
10625 + _ASM_ADD "%2,%0\n"
10626 + "int $4\n0:\n"
10627 + _ASM_EXTABLE(0b, 0b)
10628 +#endif
10629 +
10630 + "sete %1\n"
10631 : "+m" (l->a.counter), "=qm" (c)
10632 : "ir" (i) : "memory");
10633 return c;
10634 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10635 {
10636 unsigned char c;
10637
10638 - asm volatile(_ASM_DEC "%0; sete %1"
10639 + asm volatile(_ASM_DEC "%0\n"
10640 +
10641 +#ifdef CONFIG_PAX_REFCOUNT
10642 + "jno 0f\n"
10643 + _ASM_INC "%0\n"
10644 + "int $4\n0:\n"
10645 + _ASM_EXTABLE(0b, 0b)
10646 +#endif
10647 +
10648 + "sete %1\n"
10649 : "+m" (l->a.counter), "=qm" (c)
10650 : : "memory");
10651 return c != 0;
10652 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10653 {
10654 unsigned char c;
10655
10656 - asm volatile(_ASM_INC "%0; sete %1"
10657 + asm volatile(_ASM_INC "%0\n"
10658 +
10659 +#ifdef CONFIG_PAX_REFCOUNT
10660 + "jno 0f\n"
10661 + _ASM_DEC "%0\n"
10662 + "int $4\n0:\n"
10663 + _ASM_EXTABLE(0b, 0b)
10664 +#endif
10665 +
10666 + "sete %1\n"
10667 : "+m" (l->a.counter), "=qm" (c)
10668 : : "memory");
10669 return c != 0;
10670 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10671 {
10672 unsigned char c;
10673
10674 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10675 + asm volatile(_ASM_ADD "%2,%0\n"
10676 +
10677 +#ifdef CONFIG_PAX_REFCOUNT
10678 + "jno 0f\n"
10679 + _ASM_SUB "%2,%0\n"
10680 + "int $4\n0:\n"
10681 + _ASM_EXTABLE(0b, 0b)
10682 +#endif
10683 +
10684 + "sets %1\n"
10685 : "+m" (l->a.counter), "=qm" (c)
10686 : "ir" (i) : "memory");
10687 return c;
10688 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10689 #endif
10690 /* Modern 486+ processor */
10691 __i = i;
10692 - asm volatile(_ASM_XADD "%0, %1;"
10693 + asm volatile(_ASM_XADD "%0, %1\n"
10694 +
10695 +#ifdef CONFIG_PAX_REFCOUNT
10696 + "jno 0f\n"
10697 + _ASM_MOV "%0,%1\n"
10698 + "int $4\n0:\n"
10699 + _ASM_EXTABLE(0b, 0b)
10700 +#endif
10701 +
10702 : "+r" (i), "+m" (l->a.counter)
10703 : : "memory");
10704 return i + __i;
10705 diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10706 index ef51b50..514ba37 100644
10707 --- a/arch/x86/include/asm/microcode.h
10708 +++ b/arch/x86/include/asm/microcode.h
10709 @@ -12,13 +12,13 @@ struct device;
10710 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10711
10712 struct microcode_ops {
10713 - enum ucode_state (*request_microcode_user) (int cpu,
10714 + enum ucode_state (* const request_microcode_user) (int cpu,
10715 const void __user *buf, size_t size);
10716
10717 - enum ucode_state (*request_microcode_fw) (int cpu,
10718 + enum ucode_state (* const request_microcode_fw) (int cpu,
10719 struct device *device);
10720
10721 - void (*microcode_fini_cpu) (int cpu);
10722 + void (* const microcode_fini_cpu) (int cpu);
10723
10724 /*
10725 * The generic 'microcode_core' part guarantees that
10726 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
10727 extern struct ucode_cpu_info ucode_cpu_info[];
10728
10729 #ifdef CONFIG_MICROCODE_INTEL
10730 -extern struct microcode_ops * __init init_intel_microcode(void);
10731 +extern const struct microcode_ops * __init init_intel_microcode(void);
10732 #else
10733 -static inline struct microcode_ops * __init init_intel_microcode(void)
10734 +static inline const struct microcode_ops * __init init_intel_microcode(void)
10735 {
10736 return NULL;
10737 }
10738 #endif /* CONFIG_MICROCODE_INTEL */
10739
10740 #ifdef CONFIG_MICROCODE_AMD
10741 -extern struct microcode_ops * __init init_amd_microcode(void);
10742 +extern const struct microcode_ops * __init init_amd_microcode(void);
10743 #else
10744 -static inline struct microcode_ops * __init init_amd_microcode(void)
10745 +static inline const struct microcode_ops * __init init_amd_microcode(void)
10746 {
10747 return NULL;
10748 }
10749 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10750 index 593e51d..fa69c9a 100644
10751 --- a/arch/x86/include/asm/mman.h
10752 +++ b/arch/x86/include/asm/mman.h
10753 @@ -5,4 +5,14 @@
10754
10755 #include <asm-generic/mman.h>
10756
10757 +#ifdef __KERNEL__
10758 +#ifndef __ASSEMBLY__
10759 +#ifdef CONFIG_X86_32
10760 +#define arch_mmap_check i386_mmap_check
10761 +int i386_mmap_check(unsigned long addr, unsigned long len,
10762 + unsigned long flags);
10763 +#endif
10764 +#endif
10765 +#endif
10766 +
10767 #endif /* _ASM_X86_MMAN_H */
10768 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10769 index 80a1dee..239c67d 100644
10770 --- a/arch/x86/include/asm/mmu.h
10771 +++ b/arch/x86/include/asm/mmu.h
10772 @@ -9,10 +9,23 @@
10773 * we put the segment information here.
10774 */
10775 typedef struct {
10776 - void *ldt;
10777 + struct desc_struct *ldt;
10778 int size;
10779 struct mutex lock;
10780 - void *vdso;
10781 + unsigned long vdso;
10782 +
10783 +#ifdef CONFIG_X86_32
10784 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10785 + unsigned long user_cs_base;
10786 + unsigned long user_cs_limit;
10787 +
10788 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10789 + cpumask_t cpu_user_cs_mask;
10790 +#endif
10791 +
10792 +#endif
10793 +#endif
10794 +
10795 } mm_context_t;
10796
10797 #ifdef CONFIG_SMP
10798 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10799 index 8b5393e..8143173 100644
10800 --- a/arch/x86/include/asm/mmu_context.h
10801 +++ b/arch/x86/include/asm/mmu_context.h
10802 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10803
10804 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10805 {
10806 +
10807 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10808 + unsigned int i;
10809 + pgd_t *pgd;
10810 +
10811 + pax_open_kernel();
10812 + pgd = get_cpu_pgd(smp_processor_id());
10813 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10814 + set_pgd_batched(pgd+i, native_make_pgd(0));
10815 + pax_close_kernel();
10816 +#endif
10817 +
10818 #ifdef CONFIG_SMP
10819 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10820 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10821 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10822 struct task_struct *tsk)
10823 {
10824 unsigned cpu = smp_processor_id();
10825 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10826 + int tlbstate = TLBSTATE_OK;
10827 +#endif
10828
10829 if (likely(prev != next)) {
10830 #ifdef CONFIG_SMP
10831 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10832 + tlbstate = percpu_read(cpu_tlbstate.state);
10833 +#endif
10834 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10835 percpu_write(cpu_tlbstate.active_mm, next);
10836 #endif
10837 cpumask_set_cpu(cpu, mm_cpumask(next));
10838
10839 /* Re-load page tables */
10840 +#ifdef CONFIG_PAX_PER_CPU_PGD
10841 + pax_open_kernel();
10842 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10843 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10844 + pax_close_kernel();
10845 + load_cr3(get_cpu_pgd(cpu));
10846 +#else
10847 load_cr3(next->pgd);
10848 +#endif
10849
10850 /* stop flush ipis for the previous mm */
10851 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10852 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10853 */
10854 if (unlikely(prev->context.ldt != next->context.ldt))
10855 load_LDT_nolock(&next->context);
10856 - }
10857 +
10858 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10859 + if (!nx_enabled) {
10860 + smp_mb__before_clear_bit();
10861 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10862 + smp_mb__after_clear_bit();
10863 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10864 + }
10865 +#endif
10866 +
10867 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10868 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10869 + prev->context.user_cs_limit != next->context.user_cs_limit))
10870 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10871 #ifdef CONFIG_SMP
10872 + else if (unlikely(tlbstate != TLBSTATE_OK))
10873 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10874 +#endif
10875 +#endif
10876 +
10877 + }
10878 else {
10879 +
10880 +#ifdef CONFIG_PAX_PER_CPU_PGD
10881 + pax_open_kernel();
10882 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10883 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10884 + pax_close_kernel();
10885 + load_cr3(get_cpu_pgd(cpu));
10886 +#endif
10887 +
10888 +#ifdef CONFIG_SMP
10889 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10890 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10891
10892 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10893 * tlb flush IPI delivery. We must reload CR3
10894 * to make sure to use no freed page tables.
10895 */
10896 +
10897 +#ifndef CONFIG_PAX_PER_CPU_PGD
10898 load_cr3(next->pgd);
10899 +#endif
10900 +
10901 load_LDT_nolock(&next->context);
10902 +
10903 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10904 + if (!nx_enabled)
10905 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10906 +#endif
10907 +
10908 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10909 +#ifdef CONFIG_PAX_PAGEEXEC
10910 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10911 +#endif
10912 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10913 +#endif
10914 +
10915 }
10916 +#endif
10917 }
10918 -#endif
10919 }
10920
10921 #define activate_mm(prev, next) \
10922 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10923 index 3e2ce58..caaf478 100644
10924 --- a/arch/x86/include/asm/module.h
10925 +++ b/arch/x86/include/asm/module.h
10926 @@ -5,6 +5,7 @@
10927
10928 #ifdef CONFIG_X86_64
10929 /* X86_64 does not define MODULE_PROC_FAMILY */
10930 +#define MODULE_PROC_FAMILY ""
10931 #elif defined CONFIG_M386
10932 #define MODULE_PROC_FAMILY "386 "
10933 #elif defined CONFIG_M486
10934 @@ -59,13 +60,26 @@
10935 #error unknown processor family
10936 #endif
10937
10938 -#ifdef CONFIG_X86_32
10939 -# ifdef CONFIG_4KSTACKS
10940 -# define MODULE_STACKSIZE "4KSTACKS "
10941 -# else
10942 -# define MODULE_STACKSIZE ""
10943 -# endif
10944 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10945 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10946 +#define MODULE_STACKSIZE "4KSTACKS "
10947 +#else
10948 +#define MODULE_STACKSIZE ""
10949 #endif
10950
10951 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10952 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10953 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10954 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10955 +#else
10956 +#define MODULE_PAX_KERNEXEC ""
10957 +#endif
10958 +
10959 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10960 +#define MODULE_PAX_UDEREF "UDEREF "
10961 +#else
10962 +#define MODULE_PAX_UDEREF ""
10963 +#endif
10964 +
10965 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10966 +
10967 #endif /* _ASM_X86_MODULE_H */
10968 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10969 index 7639dbf..e08a58c 100644
10970 --- a/arch/x86/include/asm/page_64_types.h
10971 +++ b/arch/x86/include/asm/page_64_types.h
10972 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10973
10974 /* duplicated to the one in bootmem.h */
10975 extern unsigned long max_pfn;
10976 -extern unsigned long phys_base;
10977 +extern const unsigned long phys_base;
10978
10979 extern unsigned long __phys_addr(unsigned long);
10980 #define __phys_reloc_hide(x) (x)
10981 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10982 index efb3899..ef30687 100644
10983 --- a/arch/x86/include/asm/paravirt.h
10984 +++ b/arch/x86/include/asm/paravirt.h
10985 @@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10986 val);
10987 }
10988
10989 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10990 +{
10991 + pgdval_t val = native_pgd_val(pgd);
10992 +
10993 + if (sizeof(pgdval_t) > sizeof(long))
10994 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10995 + val, (u64)val >> 32);
10996 + else
10997 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10998 + val);
10999 +}
11000 +
11001 static inline void pgd_clear(pgd_t *pgdp)
11002 {
11003 set_pgd(pgdp, __pgd(0));
11004 @@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11005 pv_mmu_ops.set_fixmap(idx, phys, flags);
11006 }
11007
11008 +#ifdef CONFIG_PAX_KERNEXEC
11009 +static inline unsigned long pax_open_kernel(void)
11010 +{
11011 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11012 +}
11013 +
11014 +static inline unsigned long pax_close_kernel(void)
11015 +{
11016 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11017 +}
11018 +#else
11019 +static inline unsigned long pax_open_kernel(void) { return 0; }
11020 +static inline unsigned long pax_close_kernel(void) { return 0; }
11021 +#endif
11022 +
11023 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11024
11025 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
11026 @@ -945,7 +972,7 @@ extern void default_banner(void);
11027
11028 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11029 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11030 -#define PARA_INDIRECT(addr) *%cs:addr
11031 +#define PARA_INDIRECT(addr) *%ss:addr
11032 #endif
11033
11034 #define INTERRUPT_RETURN \
11035 @@ -1022,6 +1049,21 @@ extern void default_banner(void);
11036 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11037 CLBR_NONE, \
11038 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11039 +
11040 +#define GET_CR0_INTO_RDI \
11041 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11042 + mov %rax,%rdi
11043 +
11044 +#define SET_RDI_INTO_CR0 \
11045 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11046 +
11047 +#define GET_CR3_INTO_RDI \
11048 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11049 + mov %rax,%rdi
11050 +
11051 +#define SET_RDI_INTO_CR3 \
11052 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11053 +
11054 #endif /* CONFIG_X86_32 */
11055
11056 #endif /* __ASSEMBLY__ */
11057 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11058 index 9357473..aeb2de5 100644
11059 --- a/arch/x86/include/asm/paravirt_types.h
11060 +++ b/arch/x86/include/asm/paravirt_types.h
11061 @@ -78,19 +78,19 @@ struct pv_init_ops {
11062 */
11063 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11064 unsigned long addr, unsigned len);
11065 -};
11066 +} __no_const;
11067
11068
11069 struct pv_lazy_ops {
11070 /* Set deferred update mode, used for batching operations. */
11071 void (*enter)(void);
11072 void (*leave)(void);
11073 -};
11074 +} __no_const;
11075
11076 struct pv_time_ops {
11077 unsigned long long (*sched_clock)(void);
11078 unsigned long (*get_tsc_khz)(void);
11079 -};
11080 +} __no_const;
11081
11082 struct pv_cpu_ops {
11083 /* hooks for various privileged instructions */
11084 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
11085
11086 void (*start_context_switch)(struct task_struct *prev);
11087 void (*end_context_switch)(struct task_struct *next);
11088 -};
11089 +} __no_const;
11090
11091 struct pv_irq_ops {
11092 /*
11093 @@ -217,7 +217,7 @@ struct pv_apic_ops {
11094 unsigned long start_eip,
11095 unsigned long start_esp);
11096 #endif
11097 -};
11098 +} __no_const;
11099
11100 struct pv_mmu_ops {
11101 unsigned long (*read_cr2)(void);
11102 @@ -301,6 +301,7 @@ struct pv_mmu_ops {
11103 struct paravirt_callee_save make_pud;
11104
11105 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11106 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11107 #endif /* PAGETABLE_LEVELS == 4 */
11108 #endif /* PAGETABLE_LEVELS >= 3 */
11109
11110 @@ -316,6 +317,12 @@ struct pv_mmu_ops {
11111 an mfn. We can tell which is which from the index. */
11112 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11113 phys_addr_t phys, pgprot_t flags);
11114 +
11115 +#ifdef CONFIG_PAX_KERNEXEC
11116 + unsigned long (*pax_open_kernel)(void);
11117 + unsigned long (*pax_close_kernel)(void);
11118 +#endif
11119 +
11120 };
11121
11122 struct raw_spinlock;
11123 @@ -326,7 +333,7 @@ struct pv_lock_ops {
11124 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
11125 int (*spin_trylock)(struct raw_spinlock *lock);
11126 void (*spin_unlock)(struct raw_spinlock *lock);
11127 -};
11128 +} __no_const;
11129
11130 /* This contains all the paravirt structures: we get a convenient
11131 * number for each function using the offset which we use to indicate
11132 diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
11133 index b399988..3f47c38 100644
11134 --- a/arch/x86/include/asm/pci_x86.h
11135 +++ b/arch/x86/include/asm/pci_x86.h
11136 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
11137 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
11138
11139 struct pci_raw_ops {
11140 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11141 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11142 int reg, int len, u32 *val);
11143 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11144 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11145 int reg, int len, u32 val);
11146 };
11147
11148 -extern struct pci_raw_ops *raw_pci_ops;
11149 -extern struct pci_raw_ops *raw_pci_ext_ops;
11150 +extern const struct pci_raw_ops *raw_pci_ops;
11151 +extern const struct pci_raw_ops *raw_pci_ext_ops;
11152
11153 -extern struct pci_raw_ops pci_direct_conf1;
11154 +extern const struct pci_raw_ops pci_direct_conf1;
11155 extern bool port_cf9_safe;
11156
11157 /* arch_initcall level */
11158 diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
11159 index b65a36d..50345a4 100644
11160 --- a/arch/x86/include/asm/percpu.h
11161 +++ b/arch/x86/include/asm/percpu.h
11162 @@ -78,6 +78,7 @@ do { \
11163 if (0) { \
11164 T__ tmp__; \
11165 tmp__ = (val); \
11166 + (void)tmp__; \
11167 } \
11168 switch (sizeof(var)) { \
11169 case 1: \
11170 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11171 index 271de94..ef944d6 100644
11172 --- a/arch/x86/include/asm/pgalloc.h
11173 +++ b/arch/x86/include/asm/pgalloc.h
11174 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11175 pmd_t *pmd, pte_t *pte)
11176 {
11177 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11178 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11179 +}
11180 +
11181 +static inline void pmd_populate_user(struct mm_struct *mm,
11182 + pmd_t *pmd, pte_t *pte)
11183 +{
11184 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11185 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11186 }
11187
11188 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11189 index 2334982..70bc412 100644
11190 --- a/arch/x86/include/asm/pgtable-2level.h
11191 +++ b/arch/x86/include/asm/pgtable-2level.h
11192 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11193
11194 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11195 {
11196 + pax_open_kernel();
11197 *pmdp = pmd;
11198 + pax_close_kernel();
11199 }
11200
11201 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11202 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11203 index 33927d2..ccde329 100644
11204 --- a/arch/x86/include/asm/pgtable-3level.h
11205 +++ b/arch/x86/include/asm/pgtable-3level.h
11206 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11207
11208 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11209 {
11210 + pax_open_kernel();
11211 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11212 + pax_close_kernel();
11213 }
11214
11215 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11216 {
11217 + pax_open_kernel();
11218 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11219 + pax_close_kernel();
11220 }
11221
11222 /*
11223 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11224 index af6fd36..867ff74 100644
11225 --- a/arch/x86/include/asm/pgtable.h
11226 +++ b/arch/x86/include/asm/pgtable.h
11227 @@ -39,6 +39,7 @@ extern struct list_head pgd_list;
11228
11229 #ifndef __PAGETABLE_PUD_FOLDED
11230 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11231 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11232 #define pgd_clear(pgd) native_pgd_clear(pgd)
11233 #endif
11234
11235 @@ -74,12 +75,51 @@ extern struct list_head pgd_list;
11236
11237 #define arch_end_context_switch(prev) do {} while(0)
11238
11239 +#define pax_open_kernel() native_pax_open_kernel()
11240 +#define pax_close_kernel() native_pax_close_kernel()
11241 #endif /* CONFIG_PARAVIRT */
11242
11243 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11244 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11245 +
11246 +#ifdef CONFIG_PAX_KERNEXEC
11247 +static inline unsigned long native_pax_open_kernel(void)
11248 +{
11249 + unsigned long cr0;
11250 +
11251 + preempt_disable();
11252 + barrier();
11253 + cr0 = read_cr0() ^ X86_CR0_WP;
11254 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11255 + write_cr0(cr0);
11256 + return cr0 ^ X86_CR0_WP;
11257 +}
11258 +
11259 +static inline unsigned long native_pax_close_kernel(void)
11260 +{
11261 + unsigned long cr0;
11262 +
11263 + cr0 = read_cr0() ^ X86_CR0_WP;
11264 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11265 + write_cr0(cr0);
11266 + barrier();
11267 + preempt_enable_no_resched();
11268 + return cr0 ^ X86_CR0_WP;
11269 +}
11270 +#else
11271 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11272 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11273 +#endif
11274 +
11275 /*
11276 * The following only work if pte_present() is true.
11277 * Undefined behaviour if not..
11278 */
11279 +static inline int pte_user(pte_t pte)
11280 +{
11281 + return pte_val(pte) & _PAGE_USER;
11282 +}
11283 +
11284 static inline int pte_dirty(pte_t pte)
11285 {
11286 return pte_flags(pte) & _PAGE_DIRTY;
11287 @@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11288 return pte_clear_flags(pte, _PAGE_RW);
11289 }
11290
11291 +static inline pte_t pte_mkread(pte_t pte)
11292 +{
11293 + return __pte(pte_val(pte) | _PAGE_USER);
11294 +}
11295 +
11296 static inline pte_t pte_mkexec(pte_t pte)
11297 {
11298 - return pte_clear_flags(pte, _PAGE_NX);
11299 +#ifdef CONFIG_X86_PAE
11300 + if (__supported_pte_mask & _PAGE_NX)
11301 + return pte_clear_flags(pte, _PAGE_NX);
11302 + else
11303 +#endif
11304 + return pte_set_flags(pte, _PAGE_USER);
11305 +}
11306 +
11307 +static inline pte_t pte_exprotect(pte_t pte)
11308 +{
11309 +#ifdef CONFIG_X86_PAE
11310 + if (__supported_pte_mask & _PAGE_NX)
11311 + return pte_set_flags(pte, _PAGE_NX);
11312 + else
11313 +#endif
11314 + return pte_clear_flags(pte, _PAGE_USER);
11315 }
11316
11317 static inline pte_t pte_mkdirty(pte_t pte)
11318 @@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11319 #endif
11320
11321 #ifndef __ASSEMBLY__
11322 +
11323 +#ifdef CONFIG_PAX_PER_CPU_PGD
11324 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11325 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11326 +{
11327 + return cpu_pgd[cpu];
11328 +}
11329 +#endif
11330 +
11331 #include <linux/mm_types.h>
11332
11333 static inline int pte_none(pte_t pte)
11334 @@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11335
11336 static inline int pgd_bad(pgd_t pgd)
11337 {
11338 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11339 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11340 }
11341
11342 static inline int pgd_none(pgd_t pgd)
11343 @@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11344 * pgd_offset() returns a (pgd_t *)
11345 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11346 */
11347 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11348 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11349 +
11350 +#ifdef CONFIG_PAX_PER_CPU_PGD
11351 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11352 +#endif
11353 +
11354 /*
11355 * a shortcut which implies the use of the kernel's pgd, instead
11356 * of a process's
11357 @@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11358 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11359 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11360
11361 +#ifdef CONFIG_X86_32
11362 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11363 +#else
11364 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11365 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11366 +
11367 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11368 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11369 +#else
11370 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11371 +#endif
11372 +
11373 +#endif
11374 +
11375 #ifndef __ASSEMBLY__
11376
11377 extern int direct_gbpages;
11378 @@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11379 * dst and src can be on the same page, but the range must not overlap,
11380 * and must not cross a page boundary.
11381 */
11382 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11383 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11384 {
11385 - memcpy(dst, src, count * sizeof(pgd_t));
11386 + pax_open_kernel();
11387 + while (count--)
11388 + *dst++ = *src++;
11389 + pax_close_kernel();
11390 }
11391
11392 +#ifdef CONFIG_PAX_PER_CPU_PGD
11393 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11394 +#endif
11395 +
11396 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11397 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11398 +#else
11399 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11400 +#endif
11401
11402 #include <asm-generic/pgtable.h>
11403 #endif /* __ASSEMBLY__ */
11404 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11405 index 750f1bf..971e839 100644
11406 --- a/arch/x86/include/asm/pgtable_32.h
11407 +++ b/arch/x86/include/asm/pgtable_32.h
11408 @@ -26,9 +26,6 @@
11409 struct mm_struct;
11410 struct vm_area_struct;
11411
11412 -extern pgd_t swapper_pg_dir[1024];
11413 -extern pgd_t trampoline_pg_dir[1024];
11414 -
11415 static inline void pgtable_cache_init(void) { }
11416 static inline void check_pgt_cache(void) { }
11417 void paging_init(void);
11418 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11419 # include <asm/pgtable-2level.h>
11420 #endif
11421
11422 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11423 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11424 +#ifdef CONFIG_X86_PAE
11425 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11426 +#endif
11427 +
11428 #if defined(CONFIG_HIGHPTE)
11429 #define __KM_PTE \
11430 (in_nmi() ? KM_NMI_PTE : \
11431 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11432 /* Clear a kernel PTE and flush it from the TLB */
11433 #define kpte_clear_flush(ptep, vaddr) \
11434 do { \
11435 + pax_open_kernel(); \
11436 pte_clear(&init_mm, (vaddr), (ptep)); \
11437 + pax_close_kernel(); \
11438 __flush_tlb_one((vaddr)); \
11439 } while (0)
11440
11441 @@ -85,6 +90,9 @@ do { \
11442
11443 #endif /* !__ASSEMBLY__ */
11444
11445 +#define HAVE_ARCH_UNMAPPED_AREA
11446 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11447 +
11448 /*
11449 * kern_addr_valid() is (1) for FLATMEM and (0) for
11450 * SPARSEMEM and DISCONTIGMEM
11451 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11452 index 5e67c15..12d5c47 100644
11453 --- a/arch/x86/include/asm/pgtable_32_types.h
11454 +++ b/arch/x86/include/asm/pgtable_32_types.h
11455 @@ -8,7 +8,7 @@
11456 */
11457 #ifdef CONFIG_X86_PAE
11458 # include <asm/pgtable-3level_types.h>
11459 -# define PMD_SIZE (1UL << PMD_SHIFT)
11460 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11461 # define PMD_MASK (~(PMD_SIZE - 1))
11462 #else
11463 # include <asm/pgtable-2level_types.h>
11464 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11465 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11466 #endif
11467
11468 +#ifdef CONFIG_PAX_KERNEXEC
11469 +#ifndef __ASSEMBLY__
11470 +extern unsigned char MODULES_EXEC_VADDR[];
11471 +extern unsigned char MODULES_EXEC_END[];
11472 +#endif
11473 +#include <asm/boot.h>
11474 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11475 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11476 +#else
11477 +#define ktla_ktva(addr) (addr)
11478 +#define ktva_ktla(addr) (addr)
11479 +#endif
11480 +
11481 #define MODULES_VADDR VMALLOC_START
11482 #define MODULES_END VMALLOC_END
11483 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11484 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11485 index c57a301..6b414ff 100644
11486 --- a/arch/x86/include/asm/pgtable_64.h
11487 +++ b/arch/x86/include/asm/pgtable_64.h
11488 @@ -16,10 +16,14 @@
11489
11490 extern pud_t level3_kernel_pgt[512];
11491 extern pud_t level3_ident_pgt[512];
11492 +extern pud_t level3_vmalloc_start_pgt[512];
11493 +extern pud_t level3_vmalloc_end_pgt[512];
11494 +extern pud_t level3_vmemmap_pgt[512];
11495 +extern pud_t level2_vmemmap_pgt[512];
11496 extern pmd_t level2_kernel_pgt[512];
11497 extern pmd_t level2_fixmap_pgt[512];
11498 -extern pmd_t level2_ident_pgt[512];
11499 -extern pgd_t init_level4_pgt[];
11500 +extern pmd_t level2_ident_pgt[512*2];
11501 +extern pgd_t init_level4_pgt[512];
11502
11503 #define swapper_pg_dir init_level4_pgt
11504
11505 @@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11506
11507 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11508 {
11509 + pax_open_kernel();
11510 *pmdp = pmd;
11511 + pax_close_kernel();
11512 }
11513
11514 static inline void native_pmd_clear(pmd_t *pmd)
11515 @@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11516
11517 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11518 {
11519 + pax_open_kernel();
11520 + *pgdp = pgd;
11521 + pax_close_kernel();
11522 +}
11523 +
11524 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11525 +{
11526 *pgdp = pgd;
11527 }
11528
11529 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11530 index 766ea16..5b96cb3 100644
11531 --- a/arch/x86/include/asm/pgtable_64_types.h
11532 +++ b/arch/x86/include/asm/pgtable_64_types.h
11533 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11534 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11535 #define MODULES_END _AC(0xffffffffff000000, UL)
11536 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11537 +#define MODULES_EXEC_VADDR MODULES_VADDR
11538 +#define MODULES_EXEC_END MODULES_END
11539 +
11540 +#define ktla_ktva(addr) (addr)
11541 +#define ktva_ktla(addr) (addr)
11542
11543 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11544 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11545 index d1f4a76..2f46ba1 100644
11546 --- a/arch/x86/include/asm/pgtable_types.h
11547 +++ b/arch/x86/include/asm/pgtable_types.h
11548 @@ -16,12 +16,11 @@
11549 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11550 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11551 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11552 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11553 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11554 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11555 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11556 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11557 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11558 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11559 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11560 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11561
11562 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11563 @@ -39,7 +38,6 @@
11564 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11565 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11566 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11567 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11568 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11569 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11570 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11571 @@ -55,8 +53,10 @@
11572
11573 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11574 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11575 -#else
11576 +#elif defined(CONFIG_KMEMCHECK)
11577 #define _PAGE_NX (_AT(pteval_t, 0))
11578 +#else
11579 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11580 #endif
11581
11582 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11583 @@ -93,6 +93,9 @@
11584 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11585 _PAGE_ACCESSED)
11586
11587 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11588 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11589 +
11590 #define __PAGE_KERNEL_EXEC \
11591 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11592 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11593 @@ -103,8 +106,8 @@
11594 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11595 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11596 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11597 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11598 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11599 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11600 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11601 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11602 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11603 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11604 @@ -163,8 +166,8 @@
11605 * bits are combined, this will alow user to access the high address mapped
11606 * VDSO in the presence of CONFIG_COMPAT_VDSO
11607 */
11608 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11609 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11610 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11611 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11612 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11613 #endif
11614
11615 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11616 {
11617 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11618 }
11619 +#endif
11620
11621 +#if PAGETABLE_LEVELS == 3
11622 +#include <asm-generic/pgtable-nopud.h>
11623 +#endif
11624 +
11625 +#if PAGETABLE_LEVELS == 2
11626 +#include <asm-generic/pgtable-nopmd.h>
11627 +#endif
11628 +
11629 +#ifndef __ASSEMBLY__
11630 #if PAGETABLE_LEVELS > 3
11631 typedef struct { pudval_t pud; } pud_t;
11632
11633 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11634 return pud.pud;
11635 }
11636 #else
11637 -#include <asm-generic/pgtable-nopud.h>
11638 -
11639 static inline pudval_t native_pud_val(pud_t pud)
11640 {
11641 return native_pgd_val(pud.pgd);
11642 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11643 return pmd.pmd;
11644 }
11645 #else
11646 -#include <asm-generic/pgtable-nopmd.h>
11647 -
11648 static inline pmdval_t native_pmd_val(pmd_t pmd)
11649 {
11650 return native_pgd_val(pmd.pud.pgd);
11651 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11652
11653 extern pteval_t __supported_pte_mask;
11654 extern void set_nx(void);
11655 +
11656 +#ifdef CONFIG_X86_32
11657 +#ifdef CONFIG_X86_PAE
11658 extern int nx_enabled;
11659 +#else
11660 +#define nx_enabled (0)
11661 +#endif
11662 +#else
11663 +#define nx_enabled (1)
11664 +#endif
11665
11666 #define pgprot_writecombine pgprot_writecombine
11667 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11668 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11669 index fa04dea..5f823fc 100644
11670 --- a/arch/x86/include/asm/processor.h
11671 +++ b/arch/x86/include/asm/processor.h
11672 @@ -272,7 +272,7 @@ struct tss_struct {
11673
11674 } ____cacheline_aligned;
11675
11676 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11677 +extern struct tss_struct init_tss[NR_CPUS];
11678
11679 /*
11680 * Save the original ist values for checking stack pointers during debugging
11681 @@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11682 */
11683 #define TASK_SIZE PAGE_OFFSET
11684 #define TASK_SIZE_MAX TASK_SIZE
11685 +
11686 +#ifdef CONFIG_PAX_SEGMEXEC
11687 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11688 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11689 +#else
11690 #define STACK_TOP TASK_SIZE
11691 -#define STACK_TOP_MAX STACK_TOP
11692 +#endif
11693 +
11694 +#define STACK_TOP_MAX TASK_SIZE
11695
11696 #define INIT_THREAD { \
11697 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11698 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11699 .vm86_info = NULL, \
11700 .sysenter_cs = __KERNEL_CS, \
11701 .io_bitmap_ptr = NULL, \
11702 @@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11703 */
11704 #define INIT_TSS { \
11705 .x86_tss = { \
11706 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11707 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11708 .ss0 = __KERNEL_DS, \
11709 .ss1 = __KERNEL_CS, \
11710 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11711 @@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11712 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11713
11714 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11715 -#define KSTK_TOP(info) \
11716 -({ \
11717 - unsigned long *__ptr = (unsigned long *)(info); \
11718 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11719 -})
11720 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11721
11722 /*
11723 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11724 @@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11725 #define task_pt_regs(task) \
11726 ({ \
11727 struct pt_regs *__regs__; \
11728 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11729 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11730 __regs__ - 1; \
11731 })
11732
11733 @@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11734 /*
11735 * User space process size. 47bits minus one guard page.
11736 */
11737 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11738 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11739
11740 /* This decides where the kernel will search for a free chunk of vm
11741 * space during mmap's.
11742 */
11743 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11744 - 0xc0000000 : 0xFFFFe000)
11745 + 0xc0000000 : 0xFFFFf000)
11746
11747 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11748 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11749 @@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11750 #define STACK_TOP_MAX TASK_SIZE_MAX
11751
11752 #define INIT_THREAD { \
11753 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11754 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11755 }
11756
11757 #define INIT_TSS { \
11758 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11759 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11760 }
11761
11762 /*
11763 @@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11764 */
11765 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11766
11767 +#ifdef CONFIG_PAX_SEGMEXEC
11768 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11769 +#endif
11770 +
11771 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11772
11773 /* Get/set a process' ability to use the timestamp counter instruction */
11774 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11775 index 0f0d908..f2e3da2 100644
11776 --- a/arch/x86/include/asm/ptrace.h
11777 +++ b/arch/x86/include/asm/ptrace.h
11778 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11779 }
11780
11781 /*
11782 - * user_mode_vm(regs) determines whether a register set came from user mode.
11783 + * user_mode(regs) determines whether a register set came from user mode.
11784 * This is true if V8086 mode was enabled OR if the register set was from
11785 * protected mode with RPL-3 CS value. This tricky test checks that with
11786 * one comparison. Many places in the kernel can bypass this full check
11787 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11788 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11789 + * be used.
11790 */
11791 -static inline int user_mode(struct pt_regs *regs)
11792 +static inline int user_mode_novm(struct pt_regs *regs)
11793 {
11794 #ifdef CONFIG_X86_32
11795 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11796 #else
11797 - return !!(regs->cs & 3);
11798 + return !!(regs->cs & SEGMENT_RPL_MASK);
11799 #endif
11800 }
11801
11802 -static inline int user_mode_vm(struct pt_regs *regs)
11803 +static inline int user_mode(struct pt_regs *regs)
11804 {
11805 #ifdef CONFIG_X86_32
11806 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11807 USER_RPL;
11808 #else
11809 - return user_mode(regs);
11810 + return user_mode_novm(regs);
11811 #endif
11812 }
11813
11814 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11815 index 562d4fd..6e39df1 100644
11816 --- a/arch/x86/include/asm/reboot.h
11817 +++ b/arch/x86/include/asm/reboot.h
11818 @@ -6,19 +6,19 @@
11819 struct pt_regs;
11820
11821 struct machine_ops {
11822 - void (*restart)(char *cmd);
11823 - void (*halt)(void);
11824 - void (*power_off)(void);
11825 + void (* __noreturn restart)(char *cmd);
11826 + void (* __noreturn halt)(void);
11827 + void (* __noreturn power_off)(void);
11828 void (*shutdown)(void);
11829 void (*crash_shutdown)(struct pt_regs *);
11830 - void (*emergency_restart)(void);
11831 -};
11832 + void (* __noreturn emergency_restart)(void);
11833 +} __no_const;
11834
11835 extern struct machine_ops machine_ops;
11836
11837 void native_machine_crash_shutdown(struct pt_regs *regs);
11838 void native_machine_shutdown(void);
11839 -void machine_real_restart(const unsigned char *code, int length);
11840 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11841
11842 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11843 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11844 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11845 index 606ede1..dbfff37 100644
11846 --- a/arch/x86/include/asm/rwsem.h
11847 +++ b/arch/x86/include/asm/rwsem.h
11848 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11849 {
11850 asm volatile("# beginning down_read\n\t"
11851 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11852 +
11853 +#ifdef CONFIG_PAX_REFCOUNT
11854 + "jno 0f\n"
11855 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11856 + "int $4\n0:\n"
11857 + _ASM_EXTABLE(0b, 0b)
11858 +#endif
11859 +
11860 /* adds 0x00000001, returns the old value */
11861 " jns 1f\n"
11862 " call call_rwsem_down_read_failed\n"
11863 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11864 "1:\n\t"
11865 " mov %1,%2\n\t"
11866 " add %3,%2\n\t"
11867 +
11868 +#ifdef CONFIG_PAX_REFCOUNT
11869 + "jno 0f\n"
11870 + "sub %3,%2\n"
11871 + "int $4\n0:\n"
11872 + _ASM_EXTABLE(0b, 0b)
11873 +#endif
11874 +
11875 " jle 2f\n\t"
11876 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11877 " jnz 1b\n\t"
11878 @@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11879 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11880 asm volatile("# beginning down_write\n\t"
11881 LOCK_PREFIX " xadd %1,(%2)\n\t"
11882 +
11883 +#ifdef CONFIG_PAX_REFCOUNT
11884 + "jno 0f\n"
11885 + "mov %1,(%2)\n"
11886 + "int $4\n0:\n"
11887 + _ASM_EXTABLE(0b, 0b)
11888 +#endif
11889 +
11890 /* subtract 0x0000ffff, returns the old value */
11891 " test %1,%1\n\t"
11892 /* was the count 0 before? */
11893 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11894 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11895 asm volatile("# beginning __up_read\n\t"
11896 LOCK_PREFIX " xadd %1,(%2)\n\t"
11897 +
11898 +#ifdef CONFIG_PAX_REFCOUNT
11899 + "jno 0f\n"
11900 + "mov %1,(%2)\n"
11901 + "int $4\n0:\n"
11902 + _ASM_EXTABLE(0b, 0b)
11903 +#endif
11904 +
11905 /* subtracts 1, returns the old value */
11906 " jns 1f\n\t"
11907 " call call_rwsem_wake\n"
11908 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11909 rwsem_count_t tmp;
11910 asm volatile("# beginning __up_write\n\t"
11911 LOCK_PREFIX " xadd %1,(%2)\n\t"
11912 +
11913 +#ifdef CONFIG_PAX_REFCOUNT
11914 + "jno 0f\n"
11915 + "mov %1,(%2)\n"
11916 + "int $4\n0:\n"
11917 + _ASM_EXTABLE(0b, 0b)
11918 +#endif
11919 +
11920 /* tries to transition
11921 0xffff0001 -> 0x00000000 */
11922 " jz 1f\n"
11923 @@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11924 {
11925 asm volatile("# beginning __downgrade_write\n\t"
11926 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11927 +
11928 +#ifdef CONFIG_PAX_REFCOUNT
11929 + "jno 0f\n"
11930 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11931 + "int $4\n0:\n"
11932 + _ASM_EXTABLE(0b, 0b)
11933 +#endif
11934 +
11935 /*
11936 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11937 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11938 @@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11939 static inline void rwsem_atomic_add(rwsem_count_t delta,
11940 struct rw_semaphore *sem)
11941 {
11942 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11943 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11944 +
11945 +#ifdef CONFIG_PAX_REFCOUNT
11946 + "jno 0f\n"
11947 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11948 + "int $4\n0:\n"
11949 + _ASM_EXTABLE(0b, 0b)
11950 +#endif
11951 +
11952 : "+m" (sem->count)
11953 : "er" (delta));
11954 }
11955 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11956 {
11957 rwsem_count_t tmp = delta;
11958
11959 - asm volatile(LOCK_PREFIX "xadd %0,%1"
11960 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11961 +
11962 +#ifdef CONFIG_PAX_REFCOUNT
11963 + "jno 0f\n"
11964 + "mov %0,%1\n"
11965 + "int $4\n0:\n"
11966 + _ASM_EXTABLE(0b, 0b)
11967 +#endif
11968 +
11969 : "+r" (tmp), "+m" (sem->count)
11970 : : "memory");
11971
11972 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11973 index 14e0ed8..7f7dd5e 100644
11974 --- a/arch/x86/include/asm/segment.h
11975 +++ b/arch/x86/include/asm/segment.h
11976 @@ -62,10 +62,15 @@
11977 * 26 - ESPFIX small SS
11978 * 27 - per-cpu [ offset to per-cpu data area ]
11979 * 28 - stack_canary-20 [ for stack protector ]
11980 - * 29 - unused
11981 - * 30 - unused
11982 + * 29 - PCI BIOS CS
11983 + * 30 - PCI BIOS DS
11984 * 31 - TSS for double fault handler
11985 */
11986 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11987 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11988 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11989 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11990 +
11991 #define GDT_ENTRY_TLS_MIN 6
11992 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11993
11994 @@ -77,6 +82,8 @@
11995
11996 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11997
11998 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11999 +
12000 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
12001
12002 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
12003 @@ -88,7 +95,7 @@
12004 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
12005 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
12006
12007 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12008 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12009 #ifdef CONFIG_SMP
12010 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
12011 #else
12012 @@ -102,6 +109,12 @@
12013 #define __KERNEL_STACK_CANARY 0
12014 #endif
12015
12016 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
12017 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12018 +
12019 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
12020 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12021 +
12022 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12023
12024 /*
12025 @@ -139,7 +152,7 @@
12026 */
12027
12028 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12029 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12030 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12031
12032
12033 #else
12034 @@ -163,6 +176,8 @@
12035 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
12036 #define __USER32_DS __USER_DS
12037
12038 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12039 +
12040 #define GDT_ENTRY_TSS 8 /* needs two entries */
12041 #define GDT_ENTRY_LDT 10 /* needs two entries */
12042 #define GDT_ENTRY_TLS_MIN 12
12043 @@ -183,6 +198,7 @@
12044 #endif
12045
12046 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
12047 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
12048 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
12049 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
12050 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
12051 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12052 index 4c2f63c..5685db2 100644
12053 --- a/arch/x86/include/asm/smp.h
12054 +++ b/arch/x86/include/asm/smp.h
12055 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
12056 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
12057 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12058 DECLARE_PER_CPU(u16, cpu_llc_id);
12059 -DECLARE_PER_CPU(int, cpu_number);
12060 +DECLARE_PER_CPU(unsigned int, cpu_number);
12061
12062 static inline struct cpumask *cpu_sibling_mask(int cpu)
12063 {
12064 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
12065 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
12066
12067 /* Static state in head.S used to set up a CPU */
12068 -extern struct {
12069 - void *sp;
12070 - unsigned short ss;
12071 -} stack_start;
12072 +extern unsigned long stack_start; /* Initial stack pointer address */
12073
12074 struct smp_ops {
12075 void (*smp_prepare_boot_cpu)(void);
12076 @@ -60,7 +57,7 @@ struct smp_ops {
12077
12078 void (*send_call_func_ipi)(const struct cpumask *mask);
12079 void (*send_call_func_single_ipi)(int cpu);
12080 -};
12081 +} __no_const;
12082
12083 /* Globals due to paravirt */
12084 extern void set_cpu_sibling_map(int cpu);
12085 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12086 extern int safe_smp_processor_id(void);
12087
12088 #elif defined(CONFIG_X86_64_SMP)
12089 -#define raw_smp_processor_id() (percpu_read(cpu_number))
12090 -
12091 -#define stack_smp_processor_id() \
12092 -({ \
12093 - struct thread_info *ti; \
12094 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12095 - ti->cpu; \
12096 -})
12097 +#define raw_smp_processor_id() (percpu_read(cpu_number))
12098 +#define stack_smp_processor_id() raw_smp_processor_id()
12099 #define safe_smp_processor_id() smp_processor_id()
12100
12101 #endif
12102 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12103 index 4e77853..4359783 100644
12104 --- a/arch/x86/include/asm/spinlock.h
12105 +++ b/arch/x86/include/asm/spinlock.h
12106 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
12107 static inline void __raw_read_lock(raw_rwlock_t *rw)
12108 {
12109 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
12110 +
12111 +#ifdef CONFIG_PAX_REFCOUNT
12112 + "jno 0f\n"
12113 + LOCK_PREFIX " addl $1,(%0)\n"
12114 + "int $4\n0:\n"
12115 + _ASM_EXTABLE(0b, 0b)
12116 +#endif
12117 +
12118 "jns 1f\n"
12119 "call __read_lock_failed\n\t"
12120 "1:\n"
12121 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
12122 static inline void __raw_write_lock(raw_rwlock_t *rw)
12123 {
12124 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
12125 +
12126 +#ifdef CONFIG_PAX_REFCOUNT
12127 + "jno 0f\n"
12128 + LOCK_PREFIX " addl %1,(%0)\n"
12129 + "int $4\n0:\n"
12130 + _ASM_EXTABLE(0b, 0b)
12131 +#endif
12132 +
12133 "jz 1f\n"
12134 "call __write_lock_failed\n\t"
12135 "1:\n"
12136 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
12137
12138 static inline void __raw_read_unlock(raw_rwlock_t *rw)
12139 {
12140 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
12141 + asm volatile(LOCK_PREFIX "incl %0\n"
12142 +
12143 +#ifdef CONFIG_PAX_REFCOUNT
12144 + "jno 0f\n"
12145 + LOCK_PREFIX "decl %0\n"
12146 + "int $4\n0:\n"
12147 + _ASM_EXTABLE(0b, 0b)
12148 +#endif
12149 +
12150 + :"+m" (rw->lock) : : "memory");
12151 }
12152
12153 static inline void __raw_write_unlock(raw_rwlock_t *rw)
12154 {
12155 - asm volatile(LOCK_PREFIX "addl %1, %0"
12156 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
12157 +
12158 +#ifdef CONFIG_PAX_REFCOUNT
12159 + "jno 0f\n"
12160 + LOCK_PREFIX "subl %1, %0\n"
12161 + "int $4\n0:\n"
12162 + _ASM_EXTABLE(0b, 0b)
12163 +#endif
12164 +
12165 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
12166 }
12167
12168 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12169 index 1575177..cb23f52 100644
12170 --- a/arch/x86/include/asm/stackprotector.h
12171 +++ b/arch/x86/include/asm/stackprotector.h
12172 @@ -48,7 +48,7 @@
12173 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12174 */
12175 #define GDT_STACK_CANARY_INIT \
12176 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12177 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12178
12179 /*
12180 * Initialize the stackprotector canary value.
12181 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
12182
12183 static inline void load_stack_canary_segment(void)
12184 {
12185 -#ifdef CONFIG_X86_32
12186 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12187 asm volatile ("mov %0, %%gs" : : "r" (0));
12188 #endif
12189 }
12190 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
12191 index e0fbf29..858ef4a 100644
12192 --- a/arch/x86/include/asm/system.h
12193 +++ b/arch/x86/include/asm/system.h
12194 @@ -132,7 +132,7 @@ do { \
12195 "thread_return:\n\t" \
12196 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12197 __switch_canary \
12198 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12199 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12200 "movq %%rax,%%rdi\n\t" \
12201 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12202 "jnz ret_from_fork\n\t" \
12203 @@ -143,7 +143,7 @@ do { \
12204 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12205 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12206 [_tif_fork] "i" (_TIF_FORK), \
12207 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12208 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
12209 [current_task] "m" (per_cpu_var(current_task)) \
12210 __switch_canary_iparam \
12211 : "memory", "cc" __EXTRA_CLOBBER)
12212 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
12213 {
12214 unsigned long __limit;
12215 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12216 - return __limit + 1;
12217 + return __limit;
12218 }
12219
12220 static inline void native_clts(void)
12221 @@ -340,12 +340,12 @@ void enable_hlt(void);
12222
12223 void cpu_idle_wait(void);
12224
12225 -extern unsigned long arch_align_stack(unsigned long sp);
12226 +#define arch_align_stack(x) ((x) & ~0xfUL)
12227 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
12228
12229 void default_idle(void);
12230
12231 -void stop_this_cpu(void *dummy);
12232 +void stop_this_cpu(void *dummy) __noreturn;
12233
12234 /*
12235 * Force strict CPU ordering.
12236 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12237 index 19c3ce4..8962535 100644
12238 --- a/arch/x86/include/asm/thread_info.h
12239 +++ b/arch/x86/include/asm/thread_info.h
12240 @@ -10,6 +10,7 @@
12241 #include <linux/compiler.h>
12242 #include <asm/page.h>
12243 #include <asm/types.h>
12244 +#include <asm/percpu.h>
12245
12246 /*
12247 * low level task data that entry.S needs immediate access to
12248 @@ -24,7 +25,6 @@ struct exec_domain;
12249 #include <asm/atomic.h>
12250
12251 struct thread_info {
12252 - struct task_struct *task; /* main task structure */
12253 struct exec_domain *exec_domain; /* execution domain */
12254 __u32 flags; /* low level flags */
12255 __u32 status; /* thread synchronous flags */
12256 @@ -34,18 +34,12 @@ struct thread_info {
12257 mm_segment_t addr_limit;
12258 struct restart_block restart_block;
12259 void __user *sysenter_return;
12260 -#ifdef CONFIG_X86_32
12261 - unsigned long previous_esp; /* ESP of the previous stack in
12262 - case of nested (IRQ) stacks
12263 - */
12264 - __u8 supervisor_stack[0];
12265 -#endif
12266 + unsigned long lowest_stack;
12267 int uaccess_err;
12268 };
12269
12270 -#define INIT_THREAD_INFO(tsk) \
12271 +#define INIT_THREAD_INFO \
12272 { \
12273 - .task = &tsk, \
12274 .exec_domain = &default_exec_domain, \
12275 .flags = 0, \
12276 .cpu = 0, \
12277 @@ -56,7 +50,7 @@ struct thread_info {
12278 }, \
12279 }
12280
12281 -#define init_thread_info (init_thread_union.thread_info)
12282 +#define init_thread_info (init_thread_union.stack)
12283 #define init_stack (init_thread_union.stack)
12284
12285 #else /* !__ASSEMBLY__ */
12286 @@ -163,45 +157,40 @@ struct thread_info {
12287 #define alloc_thread_info(tsk) \
12288 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12289
12290 -#ifdef CONFIG_X86_32
12291 -
12292 -#define STACK_WARN (THREAD_SIZE/8)
12293 -/*
12294 - * macros/functions for gaining access to the thread information structure
12295 - *
12296 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12297 - */
12298 -#ifndef __ASSEMBLY__
12299 -
12300 -
12301 -/* how to get the current stack pointer from C */
12302 -register unsigned long current_stack_pointer asm("esp") __used;
12303 -
12304 -/* how to get the thread information struct from C */
12305 -static inline struct thread_info *current_thread_info(void)
12306 -{
12307 - return (struct thread_info *)
12308 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12309 -}
12310 -
12311 -#else /* !__ASSEMBLY__ */
12312 -
12313 +#ifdef __ASSEMBLY__
12314 /* how to get the thread information struct from ASM */
12315 #define GET_THREAD_INFO(reg) \
12316 - movl $-THREAD_SIZE, reg; \
12317 - andl %esp, reg
12318 + mov PER_CPU_VAR(current_tinfo), reg
12319
12320 /* use this one if reg already contains %esp */
12321 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12322 - andl $-THREAD_SIZE, reg
12323 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12324 +#else
12325 +/* how to get the thread information struct from C */
12326 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12327 +
12328 +static __always_inline struct thread_info *current_thread_info(void)
12329 +{
12330 + return percpu_read_stable(current_tinfo);
12331 +}
12332 +#endif
12333 +
12334 +#ifdef CONFIG_X86_32
12335 +
12336 +#define STACK_WARN (THREAD_SIZE/8)
12337 +/*
12338 + * macros/functions for gaining access to the thread information structure
12339 + *
12340 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12341 + */
12342 +#ifndef __ASSEMBLY__
12343 +
12344 +/* how to get the current stack pointer from C */
12345 +register unsigned long current_stack_pointer asm("esp") __used;
12346
12347 #endif
12348
12349 #else /* X86_32 */
12350
12351 -#include <asm/percpu.h>
12352 -#define KERNEL_STACK_OFFSET (5*8)
12353 -
12354 /*
12355 * macros/functions for gaining access to the thread information structure
12356 * preempt_count needs to be 1 initially, until the scheduler is functional.
12357 @@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12358 #ifndef __ASSEMBLY__
12359 DECLARE_PER_CPU(unsigned long, kernel_stack);
12360
12361 -static inline struct thread_info *current_thread_info(void)
12362 -{
12363 - struct thread_info *ti;
12364 - ti = (void *)(percpu_read_stable(kernel_stack) +
12365 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12366 - return ti;
12367 -}
12368 -
12369 -#else /* !__ASSEMBLY__ */
12370 -
12371 -/* how to get the thread information struct from ASM */
12372 -#define GET_THREAD_INFO(reg) \
12373 - movq PER_CPU_VAR(kernel_stack),reg ; \
12374 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12375 -
12376 +/* how to get the current stack pointer from C */
12377 +register unsigned long current_stack_pointer asm("rsp") __used;
12378 #endif
12379
12380 #endif /* !X86_32 */
12381 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12382 extern void free_thread_info(struct thread_info *ti);
12383 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12384 #define arch_task_cache_init arch_task_cache_init
12385 +
12386 +#define __HAVE_THREAD_FUNCTIONS
12387 +#define task_thread_info(task) (&(task)->tinfo)
12388 +#define task_stack_page(task) ((task)->stack)
12389 +#define setup_thread_stack(p, org) do {} while (0)
12390 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12391 +
12392 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12393 +extern struct task_struct *alloc_task_struct(void);
12394 +extern void free_task_struct(struct task_struct *);
12395 +
12396 #endif
12397 #endif /* _ASM_X86_THREAD_INFO_H */
12398 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12399 index 61c5874..8a046e9 100644
12400 --- a/arch/x86/include/asm/uaccess.h
12401 +++ b/arch/x86/include/asm/uaccess.h
12402 @@ -8,12 +8,15 @@
12403 #include <linux/thread_info.h>
12404 #include <linux/prefetch.h>
12405 #include <linux/string.h>
12406 +#include <linux/sched.h>
12407 #include <asm/asm.h>
12408 #include <asm/page.h>
12409
12410 #define VERIFY_READ 0
12411 #define VERIFY_WRITE 1
12412
12413 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12414 +
12415 /*
12416 * The fs value determines whether argument validity checking should be
12417 * performed or not. If get_fs() == USER_DS, checking is performed, with
12418 @@ -29,7 +32,12 @@
12419
12420 #define get_ds() (KERNEL_DS)
12421 #define get_fs() (current_thread_info()->addr_limit)
12422 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12423 +void __set_fs(mm_segment_t x);
12424 +void set_fs(mm_segment_t x);
12425 +#else
12426 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12427 +#endif
12428
12429 #define segment_eq(a, b) ((a).seg == (b).seg)
12430
12431 @@ -77,7 +85,33 @@
12432 * checks that the pointer is in the user space range - after calling
12433 * this function, memory access functions may still return -EFAULT.
12434 */
12435 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12436 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12437 +#define access_ok(type, addr, size) \
12438 +({ \
12439 + long __size = size; \
12440 + unsigned long __addr = (unsigned long)addr; \
12441 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12442 + unsigned long __end_ao = __addr + __size - 1; \
12443 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12444 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12445 + while(__addr_ao <= __end_ao) { \
12446 + char __c_ao; \
12447 + __addr_ao += PAGE_SIZE; \
12448 + if (__size > PAGE_SIZE) \
12449 + cond_resched(); \
12450 + if (__get_user(__c_ao, (char __user *)__addr)) \
12451 + break; \
12452 + if (type != VERIFY_WRITE) { \
12453 + __addr = __addr_ao; \
12454 + continue; \
12455 + } \
12456 + if (__put_user(__c_ao, (char __user *)__addr)) \
12457 + break; \
12458 + __addr = __addr_ao; \
12459 + } \
12460 + } \
12461 + __ret_ao; \
12462 +})
12463
12464 /*
12465 * The exception table consists of pairs of addresses: the first is the
12466 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12467 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12468 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12469
12470 -
12471 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12472 +#define __copyuser_seg "gs;"
12473 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12474 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12475 +#else
12476 +#define __copyuser_seg
12477 +#define __COPYUSER_SET_ES
12478 +#define __COPYUSER_RESTORE_ES
12479 +#endif
12480
12481 #ifdef CONFIG_X86_32
12482 #define __put_user_asm_u64(x, addr, err, errret) \
12483 - asm volatile("1: movl %%eax,0(%2)\n" \
12484 - "2: movl %%edx,4(%2)\n" \
12485 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12486 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12487 "3:\n" \
12488 ".section .fixup,\"ax\"\n" \
12489 "4: movl %3,%0\n" \
12490 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12491 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12492
12493 #define __put_user_asm_ex_u64(x, addr) \
12494 - asm volatile("1: movl %%eax,0(%1)\n" \
12495 - "2: movl %%edx,4(%1)\n" \
12496 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12497 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12498 "3:\n" \
12499 _ASM_EXTABLE(1b, 2b - 1b) \
12500 _ASM_EXTABLE(2b, 3b - 2b) \
12501 @@ -253,7 +295,7 @@ extern void __put_user_8(void);
12502 __typeof__(*(ptr)) __pu_val; \
12503 __chk_user_ptr(ptr); \
12504 might_fault(); \
12505 - __pu_val = x; \
12506 + __pu_val = (x); \
12507 switch (sizeof(*(ptr))) { \
12508 case 1: \
12509 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12510 @@ -374,7 +416,7 @@ do { \
12511 } while (0)
12512
12513 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12514 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12515 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12516 "2:\n" \
12517 ".section .fixup,\"ax\"\n" \
12518 "3: mov %3,%0\n" \
12519 @@ -382,7 +424,7 @@ do { \
12520 " jmp 2b\n" \
12521 ".previous\n" \
12522 _ASM_EXTABLE(1b, 3b) \
12523 - : "=r" (err), ltype(x) \
12524 + : "=r" (err), ltype (x) \
12525 : "m" (__m(addr)), "i" (errret), "0" (err))
12526
12527 #define __get_user_size_ex(x, ptr, size) \
12528 @@ -407,7 +449,7 @@ do { \
12529 } while (0)
12530
12531 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12532 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12533 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12534 "2:\n" \
12535 _ASM_EXTABLE(1b, 2b - 1b) \
12536 : ltype(x) : "m" (__m(addr)))
12537 @@ -424,13 +466,24 @@ do { \
12538 int __gu_err; \
12539 unsigned long __gu_val; \
12540 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12541 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12542 + (x) = (__typeof__(*(ptr)))__gu_val; \
12543 __gu_err; \
12544 })
12545
12546 /* FIXME: this hack is definitely wrong -AK */
12547 struct __large_struct { unsigned long buf[100]; };
12548 -#define __m(x) (*(struct __large_struct __user *)(x))
12549 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12550 +#define ____m(x) \
12551 +({ \
12552 + unsigned long ____x = (unsigned long)(x); \
12553 + if (____x < PAX_USER_SHADOW_BASE) \
12554 + ____x += PAX_USER_SHADOW_BASE; \
12555 + (void __user *)____x; \
12556 +})
12557 +#else
12558 +#define ____m(x) (x)
12559 +#endif
12560 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12561
12562 /*
12563 * Tell gcc we read from memory instead of writing: this is because
12564 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12565 * aliasing issues.
12566 */
12567 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12568 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12569 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12570 "2:\n" \
12571 ".section .fixup,\"ax\"\n" \
12572 "3: mov %3,%0\n" \
12573 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12574 ".previous\n" \
12575 _ASM_EXTABLE(1b, 3b) \
12576 : "=r"(err) \
12577 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12578 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12579
12580 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12581 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12582 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12583 "2:\n" \
12584 _ASM_EXTABLE(1b, 2b - 1b) \
12585 : : ltype(x), "m" (__m(addr)))
12586 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12587 * On error, the variable @x is set to zero.
12588 */
12589
12590 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12591 +#define __get_user(x, ptr) get_user((x), (ptr))
12592 +#else
12593 #define __get_user(x, ptr) \
12594 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12595 +#endif
12596
12597 /**
12598 * __put_user: - Write a simple value into user space, with less checking.
12599 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12600 * Returns zero on success, or -EFAULT on error.
12601 */
12602
12603 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12604 +#define __put_user(x, ptr) put_user((x), (ptr))
12605 +#else
12606 #define __put_user(x, ptr) \
12607 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12608 +#endif
12609
12610 #define __get_user_unaligned __get_user
12611 #define __put_user_unaligned __put_user
12612 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12613 #define get_user_ex(x, ptr) do { \
12614 unsigned long __gue_val; \
12615 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12616 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12617 + (x) = (__typeof__(*(ptr)))__gue_val; \
12618 } while (0)
12619
12620 #ifdef CONFIG_X86_WP_WORKS_OK
12621 @@ -567,6 +628,7 @@ extern struct movsl_mask {
12622
12623 #define ARCH_HAS_NOCACHE_UACCESS 1
12624
12625 +#define ARCH_HAS_SORT_EXTABLE
12626 #ifdef CONFIG_X86_32
12627 # include "uaccess_32.h"
12628 #else
12629 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12630 index 632fb44..e30e334 100644
12631 --- a/arch/x86/include/asm/uaccess_32.h
12632 +++ b/arch/x86/include/asm/uaccess_32.h
12633 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12634 static __always_inline unsigned long __must_check
12635 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12636 {
12637 + pax_track_stack();
12638 +
12639 + if ((long)n < 0)
12640 + return n;
12641 +
12642 if (__builtin_constant_p(n)) {
12643 unsigned long ret;
12644
12645 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12646 return ret;
12647 }
12648 }
12649 + if (!__builtin_constant_p(n))
12650 + check_object_size(from, n, true);
12651 return __copy_to_user_ll(to, from, n);
12652 }
12653
12654 @@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12655 __copy_to_user(void __user *to, const void *from, unsigned long n)
12656 {
12657 might_fault();
12658 +
12659 return __copy_to_user_inatomic(to, from, n);
12660 }
12661
12662 static __always_inline unsigned long
12663 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12664 {
12665 + if ((long)n < 0)
12666 + return n;
12667 +
12668 /* Avoid zeroing the tail if the copy fails..
12669 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12670 * but as the zeroing behaviour is only significant when n is not
12671 @@ -138,6 +149,12 @@ static __always_inline unsigned long
12672 __copy_from_user(void *to, const void __user *from, unsigned long n)
12673 {
12674 might_fault();
12675 +
12676 + pax_track_stack();
12677 +
12678 + if ((long)n < 0)
12679 + return n;
12680 +
12681 if (__builtin_constant_p(n)) {
12682 unsigned long ret;
12683
12684 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12685 return ret;
12686 }
12687 }
12688 + if (!__builtin_constant_p(n))
12689 + check_object_size(to, n, false);
12690 return __copy_from_user_ll(to, from, n);
12691 }
12692
12693 @@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12694 const void __user *from, unsigned long n)
12695 {
12696 might_fault();
12697 +
12698 + if ((long)n < 0)
12699 + return n;
12700 +
12701 if (__builtin_constant_p(n)) {
12702 unsigned long ret;
12703
12704 @@ -182,14 +205,62 @@ static __always_inline unsigned long
12705 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12706 unsigned long n)
12707 {
12708 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12709 + if ((long)n < 0)
12710 + return n;
12711 +
12712 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12713 +}
12714 +
12715 +/**
12716 + * copy_to_user: - Copy a block of data into user space.
12717 + * @to: Destination address, in user space.
12718 + * @from: Source address, in kernel space.
12719 + * @n: Number of bytes to copy.
12720 + *
12721 + * Context: User context only. This function may sleep.
12722 + *
12723 + * Copy data from kernel space to user space.
12724 + *
12725 + * Returns number of bytes that could not be copied.
12726 + * On success, this will be zero.
12727 + */
12728 +static __always_inline unsigned long __must_check
12729 +copy_to_user(void __user *to, const void *from, unsigned long n)
12730 +{
12731 + if (access_ok(VERIFY_WRITE, to, n))
12732 + n = __copy_to_user(to, from, n);
12733 + return n;
12734 +}
12735 +
12736 +/**
12737 + * copy_from_user: - Copy a block of data from user space.
12738 + * @to: Destination address, in kernel space.
12739 + * @from: Source address, in user space.
12740 + * @n: Number of bytes to copy.
12741 + *
12742 + * Context: User context only. This function may sleep.
12743 + *
12744 + * Copy data from user space to kernel space.
12745 + *
12746 + * Returns number of bytes that could not be copied.
12747 + * On success, this will be zero.
12748 + *
12749 + * If some data could not be copied, this function will pad the copied
12750 + * data to the requested size using zero bytes.
12751 + */
12752 +static __always_inline unsigned long __must_check
12753 +copy_from_user(void *to, const void __user *from, unsigned long n)
12754 +{
12755 + if (access_ok(VERIFY_READ, from, n))
12756 + n = __copy_from_user(to, from, n);
12757 + else if ((long)n > 0) {
12758 + if (!__builtin_constant_p(n))
12759 + check_object_size(to, n, false);
12760 + memset(to, 0, n);
12761 + }
12762 + return n;
12763 }
12764
12765 -unsigned long __must_check copy_to_user(void __user *to,
12766 - const void *from, unsigned long n);
12767 -unsigned long __must_check copy_from_user(void *to,
12768 - const void __user *from,
12769 - unsigned long n);
12770 long __must_check strncpy_from_user(char *dst, const char __user *src,
12771 long count);
12772 long __must_check __strncpy_from_user(char *dst,
12773 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12774 index db24b21..f595ae7 100644
12775 --- a/arch/x86/include/asm/uaccess_64.h
12776 +++ b/arch/x86/include/asm/uaccess_64.h
12777 @@ -9,6 +9,9 @@
12778 #include <linux/prefetch.h>
12779 #include <linux/lockdep.h>
12780 #include <asm/page.h>
12781 +#include <asm/pgtable.h>
12782 +
12783 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12784
12785 /*
12786 * Copy To/From Userspace
12787 @@ -16,116 +19,205 @@
12788
12789 /* Handles exceptions in both to and from, but doesn't do access_ok */
12790 __must_check unsigned long
12791 -copy_user_generic(void *to, const void *from, unsigned len);
12792 +copy_user_generic(void *to, const void *from, unsigned long len);
12793
12794 __must_check unsigned long
12795 -copy_to_user(void __user *to, const void *from, unsigned len);
12796 -__must_check unsigned long
12797 -copy_from_user(void *to, const void __user *from, unsigned len);
12798 -__must_check unsigned long
12799 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12800 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
12801
12802 static __always_inline __must_check
12803 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12804 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12805 {
12806 - int ret = 0;
12807 + unsigned ret = 0;
12808
12809 might_fault();
12810 - if (!__builtin_constant_p(size))
12811 - return copy_user_generic(dst, (__force void *)src, size);
12812 +
12813 + if (size > INT_MAX)
12814 + return size;
12815 +
12816 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12817 + if (!__access_ok(VERIFY_READ, src, size))
12818 + return size;
12819 +#endif
12820 +
12821 + if (!__builtin_constant_p(size)) {
12822 + check_object_size(dst, size, false);
12823 +
12824 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12825 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12826 + src += PAX_USER_SHADOW_BASE;
12827 +#endif
12828 +
12829 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12830 + }
12831 switch (size) {
12832 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12833 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12834 ret, "b", "b", "=q", 1);
12835 return ret;
12836 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12837 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12838 ret, "w", "w", "=r", 2);
12839 return ret;
12840 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12841 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12842 ret, "l", "k", "=r", 4);
12843 return ret;
12844 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12845 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12846 ret, "q", "", "=r", 8);
12847 return ret;
12848 case 10:
12849 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12850 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12851 ret, "q", "", "=r", 10);
12852 if (unlikely(ret))
12853 return ret;
12854 __get_user_asm(*(u16 *)(8 + (char *)dst),
12855 - (u16 __user *)(8 + (char __user *)src),
12856 + (const u16 __user *)(8 + (const char __user *)src),
12857 ret, "w", "w", "=r", 2);
12858 return ret;
12859 case 16:
12860 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12861 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12862 ret, "q", "", "=r", 16);
12863 if (unlikely(ret))
12864 return ret;
12865 __get_user_asm(*(u64 *)(8 + (char *)dst),
12866 - (u64 __user *)(8 + (char __user *)src),
12867 + (const u64 __user *)(8 + (const char __user *)src),
12868 ret, "q", "", "=r", 8);
12869 return ret;
12870 default:
12871 - return copy_user_generic(dst, (__force void *)src, size);
12872 +
12873 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12874 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12875 + src += PAX_USER_SHADOW_BASE;
12876 +#endif
12877 +
12878 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12879 }
12880 }
12881
12882 static __always_inline __must_check
12883 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12884 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12885 {
12886 - int ret = 0;
12887 + unsigned ret = 0;
12888
12889 might_fault();
12890 - if (!__builtin_constant_p(size))
12891 - return copy_user_generic((__force void *)dst, src, size);
12892 +
12893 + pax_track_stack();
12894 +
12895 + if (size > INT_MAX)
12896 + return size;
12897 +
12898 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12899 + if (!__access_ok(VERIFY_WRITE, dst, size))
12900 + return size;
12901 +#endif
12902 +
12903 + if (!__builtin_constant_p(size)) {
12904 + check_object_size(src, size, true);
12905 +
12906 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12907 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12908 + dst += PAX_USER_SHADOW_BASE;
12909 +#endif
12910 +
12911 + return copy_user_generic((__force_kernel void *)dst, src, size);
12912 + }
12913 switch (size) {
12914 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12915 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12916 ret, "b", "b", "iq", 1);
12917 return ret;
12918 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12919 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12920 ret, "w", "w", "ir", 2);
12921 return ret;
12922 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12923 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12924 ret, "l", "k", "ir", 4);
12925 return ret;
12926 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12927 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12928 ret, "q", "", "er", 8);
12929 return ret;
12930 case 10:
12931 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12932 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12933 ret, "q", "", "er", 10);
12934 if (unlikely(ret))
12935 return ret;
12936 asm("":::"memory");
12937 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12938 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12939 ret, "w", "w", "ir", 2);
12940 return ret;
12941 case 16:
12942 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12943 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12944 ret, "q", "", "er", 16);
12945 if (unlikely(ret))
12946 return ret;
12947 asm("":::"memory");
12948 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12949 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12950 ret, "q", "", "er", 8);
12951 return ret;
12952 default:
12953 - return copy_user_generic((__force void *)dst, src, size);
12954 +
12955 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12956 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12957 + dst += PAX_USER_SHADOW_BASE;
12958 +#endif
12959 +
12960 + return copy_user_generic((__force_kernel void *)dst, src, size);
12961 + }
12962 +}
12963 +
12964 +static __always_inline __must_check
12965 +unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12966 +{
12967 + if (access_ok(VERIFY_WRITE, to, len))
12968 + len = __copy_to_user(to, from, len);
12969 + return len;
12970 +}
12971 +
12972 +static __always_inline __must_check
12973 +unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12974 +{
12975 + might_fault();
12976 +
12977 + if (access_ok(VERIFY_READ, from, len))
12978 + len = __copy_from_user(to, from, len);
12979 + else if (len < INT_MAX) {
12980 + if (!__builtin_constant_p(len))
12981 + check_object_size(to, len, false);
12982 + memset(to, 0, len);
12983 }
12984 + return len;
12985 }
12986
12987 static __always_inline __must_check
12988 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12989 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12990 {
12991 - int ret = 0;
12992 + unsigned ret = 0;
12993
12994 might_fault();
12995 - if (!__builtin_constant_p(size))
12996 - return copy_user_generic((__force void *)dst,
12997 - (__force void *)src, size);
12998 +
12999 + pax_track_stack();
13000 +
13001 + if (size > INT_MAX)
13002 + return size;
13003 +
13004 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13005 + if (!__access_ok(VERIFY_READ, src, size))
13006 + return size;
13007 + if (!__access_ok(VERIFY_WRITE, dst, size))
13008 + return size;
13009 +#endif
13010 +
13011 + if (!__builtin_constant_p(size)) {
13012 +
13013 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13014 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13015 + src += PAX_USER_SHADOW_BASE;
13016 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13017 + dst += PAX_USER_SHADOW_BASE;
13018 +#endif
13019 +
13020 + return copy_user_generic((__force_kernel void *)dst,
13021 + (__force_kernel const void *)src, size);
13022 + }
13023 switch (size) {
13024 case 1: {
13025 u8 tmp;
13026 - __get_user_asm(tmp, (u8 __user *)src,
13027 + __get_user_asm(tmp, (const u8 __user *)src,
13028 ret, "b", "b", "=q", 1);
13029 if (likely(!ret))
13030 __put_user_asm(tmp, (u8 __user *)dst,
13031 @@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13032 }
13033 case 2: {
13034 u16 tmp;
13035 - __get_user_asm(tmp, (u16 __user *)src,
13036 + __get_user_asm(tmp, (const u16 __user *)src,
13037 ret, "w", "w", "=r", 2);
13038 if (likely(!ret))
13039 __put_user_asm(tmp, (u16 __user *)dst,
13040 @@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13041
13042 case 4: {
13043 u32 tmp;
13044 - __get_user_asm(tmp, (u32 __user *)src,
13045 + __get_user_asm(tmp, (const u32 __user *)src,
13046 ret, "l", "k", "=r", 4);
13047 if (likely(!ret))
13048 __put_user_asm(tmp, (u32 __user *)dst,
13049 @@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13050 }
13051 case 8: {
13052 u64 tmp;
13053 - __get_user_asm(tmp, (u64 __user *)src,
13054 + __get_user_asm(tmp, (const u64 __user *)src,
13055 ret, "q", "", "=r", 8);
13056 if (likely(!ret))
13057 __put_user_asm(tmp, (u64 __user *)dst,
13058 @@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13059 return ret;
13060 }
13061 default:
13062 - return copy_user_generic((__force void *)dst,
13063 - (__force void *)src, size);
13064 +
13065 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13066 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13067 + src += PAX_USER_SHADOW_BASE;
13068 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13069 + dst += PAX_USER_SHADOW_BASE;
13070 +#endif
13071 +
13072 + return copy_user_generic((__force_kernel void *)dst,
13073 + (__force_kernel const void *)src, size);
13074 }
13075 }
13076
13077 @@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
13078 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
13079 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13080
13081 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
13082 - unsigned size);
13083 +static __must_check __always_inline unsigned long
13084 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13085 +{
13086 + pax_track_stack();
13087 +
13088 + if (size > INT_MAX)
13089 + return size;
13090 +
13091 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13092 + if (!__access_ok(VERIFY_READ, src, size))
13093 + return size;
13094
13095 -static __must_check __always_inline int
13096 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13097 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13098 + src += PAX_USER_SHADOW_BASE;
13099 +#endif
13100 +
13101 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13102 +}
13103 +
13104 +static __must_check __always_inline unsigned long
13105 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13106 {
13107 - return copy_user_generic((__force void *)dst, src, size);
13108 + if (size > INT_MAX)
13109 + return size;
13110 +
13111 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13112 + if (!__access_ok(VERIFY_WRITE, dst, size))
13113 + return size;
13114 +
13115 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13116 + dst += PAX_USER_SHADOW_BASE;
13117 +#endif
13118 +
13119 + return copy_user_generic((__force_kernel void *)dst, src, size);
13120 }
13121
13122 -extern long __copy_user_nocache(void *dst, const void __user *src,
13123 - unsigned size, int zerorest);
13124 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13125 + unsigned long size, int zerorest);
13126
13127 -static inline int
13128 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13129 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13130 {
13131 might_sleep();
13132 +
13133 + if (size > INT_MAX)
13134 + return size;
13135 +
13136 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13137 + if (!__access_ok(VERIFY_READ, src, size))
13138 + return size;
13139 +#endif
13140 +
13141 return __copy_user_nocache(dst, src, size, 1);
13142 }
13143
13144 -static inline int
13145 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13146 - unsigned size)
13147 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13148 + unsigned long size)
13149 {
13150 + if (size > INT_MAX)
13151 + return size;
13152 +
13153 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13154 + if (!__access_ok(VERIFY_READ, src, size))
13155 + return size;
13156 +#endif
13157 +
13158 return __copy_user_nocache(dst, src, size, 0);
13159 }
13160
13161 -unsigned long
13162 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13163 +extern unsigned long
13164 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
13165
13166 #endif /* _ASM_X86_UACCESS_64_H */
13167 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13168 index 9064052..786cfbc 100644
13169 --- a/arch/x86/include/asm/vdso.h
13170 +++ b/arch/x86/include/asm/vdso.h
13171 @@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
13172 #define VDSO32_SYMBOL(base, name) \
13173 ({ \
13174 extern const char VDSO32_##name[]; \
13175 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13176 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13177 })
13178 #endif
13179
13180 diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
13181 index 3d61e20..9507180 100644
13182 --- a/arch/x86/include/asm/vgtod.h
13183 +++ b/arch/x86/include/asm/vgtod.h
13184 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
13185 int sysctl_enabled;
13186 struct timezone sys_tz;
13187 struct { /* extract of a clocksource struct */
13188 + char name[8];
13189 cycle_t (*vread)(void);
13190 cycle_t cycle_last;
13191 cycle_t mask;
13192 diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
13193 index 61e08c0..b0da582 100644
13194 --- a/arch/x86/include/asm/vmi.h
13195 +++ b/arch/x86/include/asm/vmi.h
13196 @@ -191,6 +191,7 @@ struct vrom_header {
13197 u8 reserved[96]; /* Reserved for headers */
13198 char vmi_init[8]; /* VMI_Init jump point */
13199 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
13200 + char rom_data[8048]; /* rest of the option ROM */
13201 } __attribute__((packed));
13202
13203 struct pnp_header {
13204 diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
13205 index c6e0bee..fcb9f74 100644
13206 --- a/arch/x86/include/asm/vmi_time.h
13207 +++ b/arch/x86/include/asm/vmi_time.h
13208 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
13209 int (*wallclock_updated)(void);
13210 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
13211 void (*cancel_alarm)(u32 flags);
13212 -} vmi_timer_ops;
13213 +} __no_const vmi_timer_ops;
13214
13215 /* Prototypes */
13216 extern void __init vmi_time_init(void);
13217 diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
13218 index d0983d2..1f7c9e9 100644
13219 --- a/arch/x86/include/asm/vsyscall.h
13220 +++ b/arch/x86/include/asm/vsyscall.h
13221 @@ -15,9 +15,10 @@ enum vsyscall_num {
13222
13223 #ifdef __KERNEL__
13224 #include <linux/seqlock.h>
13225 +#include <linux/getcpu.h>
13226 +#include <linux/time.h>
13227
13228 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
13229 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
13230
13231 /* Definitions for CONFIG_GENERIC_TIME definitions */
13232 #define __section_vsyscall_gtod_data __attribute__ \
13233 @@ -31,7 +32,6 @@ enum vsyscall_num {
13234 #define VGETCPU_LSL 2
13235
13236 extern int __vgetcpu_mode;
13237 -extern volatile unsigned long __jiffies;
13238
13239 /* kernel space (writeable) */
13240 extern int vgetcpu_mode;
13241 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
13242
13243 extern void map_vsyscall(void);
13244
13245 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
13246 +extern time_t vtime(time_t *t);
13247 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
13248 #endif /* __KERNEL__ */
13249
13250 #endif /* _ASM_X86_VSYSCALL_H */
13251 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13252 index 2c756fd..3377e37 100644
13253 --- a/arch/x86/include/asm/x86_init.h
13254 +++ b/arch/x86/include/asm/x86_init.h
13255 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
13256 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13257 void (*find_smp_config)(unsigned int reserve);
13258 void (*get_smp_config)(unsigned int early);
13259 -};
13260 +} __no_const;
13261
13262 /**
13263 * struct x86_init_resources - platform specific resource related ops
13264 @@ -42,7 +42,7 @@ struct x86_init_resources {
13265 void (*probe_roms)(void);
13266 void (*reserve_resources)(void);
13267 char *(*memory_setup)(void);
13268 -};
13269 +} __no_const;
13270
13271 /**
13272 * struct x86_init_irqs - platform specific interrupt setup
13273 @@ -55,7 +55,7 @@ struct x86_init_irqs {
13274 void (*pre_vector_init)(void);
13275 void (*intr_init)(void);
13276 void (*trap_init)(void);
13277 -};
13278 +} __no_const;
13279
13280 /**
13281 * struct x86_init_oem - oem platform specific customizing functions
13282 @@ -65,7 +65,7 @@ struct x86_init_irqs {
13283 struct x86_init_oem {
13284 void (*arch_setup)(void);
13285 void (*banner)(void);
13286 -};
13287 +} __no_const;
13288
13289 /**
13290 * struct x86_init_paging - platform specific paging functions
13291 @@ -75,7 +75,7 @@ struct x86_init_oem {
13292 struct x86_init_paging {
13293 void (*pagetable_setup_start)(pgd_t *base);
13294 void (*pagetable_setup_done)(pgd_t *base);
13295 -};
13296 +} __no_const;
13297
13298 /**
13299 * struct x86_init_timers - platform specific timer setup
13300 @@ -88,7 +88,7 @@ struct x86_init_timers {
13301 void (*setup_percpu_clockev)(void);
13302 void (*tsc_pre_init)(void);
13303 void (*timer_init)(void);
13304 -};
13305 +} __no_const;
13306
13307 /**
13308 * struct x86_init_ops - functions for platform specific setup
13309 @@ -101,7 +101,7 @@ struct x86_init_ops {
13310 struct x86_init_oem oem;
13311 struct x86_init_paging paging;
13312 struct x86_init_timers timers;
13313 -};
13314 +} __no_const;
13315
13316 /**
13317 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13318 @@ -109,7 +109,7 @@ struct x86_init_ops {
13319 */
13320 struct x86_cpuinit_ops {
13321 void (*setup_percpu_clockev)(void);
13322 -};
13323 +} __no_const;
13324
13325 /**
13326 * struct x86_platform_ops - platform specific runtime functions
13327 @@ -121,7 +121,7 @@ struct x86_platform_ops {
13328 unsigned long (*calibrate_tsc)(void);
13329 unsigned long (*get_wallclock)(void);
13330 int (*set_wallclock)(unsigned long nowtime);
13331 -};
13332 +} __no_const;
13333
13334 extern struct x86_init_ops x86_init;
13335 extern struct x86_cpuinit_ops x86_cpuinit;
13336 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13337 index 727acc1..554f3eb 100644
13338 --- a/arch/x86/include/asm/xsave.h
13339 +++ b/arch/x86/include/asm/xsave.h
13340 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13341 static inline int xsave_user(struct xsave_struct __user *buf)
13342 {
13343 int err;
13344 +
13345 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13346 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13347 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13348 +#endif
13349 +
13350 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13351 "2:\n"
13352 ".section .fixup,\"ax\"\n"
13353 @@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13354 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13355 {
13356 int err;
13357 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13358 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13359 u32 lmask = mask;
13360 u32 hmask = mask >> 32;
13361
13362 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13363 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13364 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13365 +#endif
13366 +
13367 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13368 "2:\n"
13369 ".section .fixup,\"ax\"\n"
13370 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13371 index 6a564ac..9b1340c 100644
13372 --- a/arch/x86/kernel/acpi/realmode/Makefile
13373 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13374 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13375 $(call cc-option, -fno-stack-protector) \
13376 $(call cc-option, -mpreferred-stack-boundary=2)
13377 KBUILD_CFLAGS += $(call cc-option, -m32)
13378 +ifdef CONSTIFY_PLUGIN
13379 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13380 +endif
13381 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13382 GCOV_PROFILE := n
13383
13384 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13385 index 580b4e2..d4129e4 100644
13386 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13387 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13388 @@ -91,6 +91,9 @@ _start:
13389 /* Do any other stuff... */
13390
13391 #ifndef CONFIG_64BIT
13392 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
13393 + call verify_cpu
13394 +
13395 /* This could also be done in C code... */
13396 movl pmode_cr3, %eax
13397 movl %eax, %cr3
13398 @@ -104,7 +107,7 @@ _start:
13399 movl %eax, %ecx
13400 orl %edx, %ecx
13401 jz 1f
13402 - movl $0xc0000080, %ecx
13403 + mov $MSR_EFER, %ecx
13404 wrmsr
13405 1:
13406
13407 @@ -114,6 +117,7 @@ _start:
13408 movl pmode_cr0, %eax
13409 movl %eax, %cr0
13410 jmp pmode_return
13411 +# include "../../verify_cpu.S"
13412 #else
13413 pushw $0
13414 pushw trampoline_segment
13415 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13416 index ca93638..7042f24 100644
13417 --- a/arch/x86/kernel/acpi/sleep.c
13418 +++ b/arch/x86/kernel/acpi/sleep.c
13419 @@ -11,11 +11,12 @@
13420 #include <linux/cpumask.h>
13421 #include <asm/segment.h>
13422 #include <asm/desc.h>
13423 +#include <asm/e820.h>
13424
13425 #include "realmode/wakeup.h"
13426 #include "sleep.h"
13427
13428 -unsigned long acpi_wakeup_address;
13429 +unsigned long acpi_wakeup_address = 0x2000;
13430 unsigned long acpi_realmode_flags;
13431
13432 /* address in low memory of the wakeup routine. */
13433 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13434 #else /* CONFIG_64BIT */
13435 header->trampoline_segment = setup_trampoline() >> 4;
13436 #ifdef CONFIG_SMP
13437 - stack_start.sp = temp_stack + sizeof(temp_stack);
13438 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13439 +
13440 + pax_open_kernel();
13441 early_gdt_descr.address =
13442 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13443 + pax_close_kernel();
13444 +
13445 initial_gs = per_cpu_offset(smp_processor_id());
13446 #endif
13447 initial_code = (unsigned long)wakeup_long64;
13448 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13449 return;
13450 }
13451
13452 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13453 -
13454 - if (!acpi_realmode) {
13455 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13456 - return;
13457 - }
13458 -
13459 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13460 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13461 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13462 }
13463
13464
13465 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13466 index 8ded418..079961e 100644
13467 --- a/arch/x86/kernel/acpi/wakeup_32.S
13468 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13469 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13470 # and restore the stack ... but you need gdt for this to work
13471 movl saved_context_esp, %esp
13472
13473 - movl %cs:saved_magic, %eax
13474 - cmpl $0x12345678, %eax
13475 + cmpl $0x12345678, saved_magic
13476 jne bogus_magic
13477
13478 # jump to place where we left off
13479 - movl saved_eip, %eax
13480 - jmp *%eax
13481 + jmp *(saved_eip)
13482
13483 bogus_magic:
13484 jmp bogus_magic
13485 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13486 index de7353c..075da5f 100644
13487 --- a/arch/x86/kernel/alternative.c
13488 +++ b/arch/x86/kernel/alternative.c
13489 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13490
13491 BUG_ON(p->len > MAX_PATCH_LEN);
13492 /* prep the buffer with the original instructions */
13493 - memcpy(insnbuf, p->instr, p->len);
13494 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13495 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13496 (unsigned long)p->instr, p->len);
13497
13498 @@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13499 if (smp_alt_once)
13500 free_init_pages("SMP alternatives",
13501 (unsigned long)__smp_locks,
13502 - (unsigned long)__smp_locks_end);
13503 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13504
13505 restart_nmi();
13506 }
13507 @@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13508 * instructions. And on the local CPU you need to be protected again NMI or MCE
13509 * handlers seeing an inconsistent instruction while you patch.
13510 */
13511 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13512 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
13513 size_t len)
13514 {
13515 unsigned long flags;
13516 local_irq_save(flags);
13517 - memcpy(addr, opcode, len);
13518 +
13519 + pax_open_kernel();
13520 + memcpy(ktla_ktva(addr), opcode, len);
13521 sync_core();
13522 + pax_close_kernel();
13523 +
13524 local_irq_restore(flags);
13525 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13526 that causes hangs on some VIA CPUs. */
13527 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13528 */
13529 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13530 {
13531 - unsigned long flags;
13532 - char *vaddr;
13533 + unsigned char *vaddr = ktla_ktva(addr);
13534 struct page *pages[2];
13535 - int i;
13536 + size_t i;
13537
13538 if (!core_kernel_text((unsigned long)addr)) {
13539 - pages[0] = vmalloc_to_page(addr);
13540 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13541 + pages[0] = vmalloc_to_page(vaddr);
13542 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13543 } else {
13544 - pages[0] = virt_to_page(addr);
13545 + pages[0] = virt_to_page(vaddr);
13546 WARN_ON(!PageReserved(pages[0]));
13547 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13548 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13549 }
13550 BUG_ON(!pages[0]);
13551 - local_irq_save(flags);
13552 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13553 - if (pages[1])
13554 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13555 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13556 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13557 - clear_fixmap(FIX_TEXT_POKE0);
13558 - if (pages[1])
13559 - clear_fixmap(FIX_TEXT_POKE1);
13560 - local_flush_tlb();
13561 - sync_core();
13562 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13563 - that causes hangs on some VIA CPUs. */
13564 + text_poke_early(addr, opcode, len);
13565 for (i = 0; i < len; i++)
13566 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13567 - local_irq_restore(flags);
13568 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13569 return addr;
13570 }
13571 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13572 index 3a44b75..1601800 100644
13573 --- a/arch/x86/kernel/amd_iommu.c
13574 +++ b/arch/x86/kernel/amd_iommu.c
13575 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13576 }
13577 }
13578
13579 -static struct dma_map_ops amd_iommu_dma_ops = {
13580 +static const struct dma_map_ops amd_iommu_dma_ops = {
13581 .alloc_coherent = alloc_coherent,
13582 .free_coherent = free_coherent,
13583 .map_page = map_page,
13584 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13585 index 1d2d670..8e3f477 100644
13586 --- a/arch/x86/kernel/apic/apic.c
13587 +++ b/arch/x86/kernel/apic/apic.c
13588 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13589 /*
13590 * Debug level, exported for io_apic.c
13591 */
13592 -unsigned int apic_verbosity;
13593 +int apic_verbosity;
13594
13595 int pic_mode;
13596
13597 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13598 apic_write(APIC_ESR, 0);
13599 v1 = apic_read(APIC_ESR);
13600 ack_APIC_irq();
13601 - atomic_inc(&irq_err_count);
13602 + atomic_inc_unchecked(&irq_err_count);
13603
13604 /*
13605 * Here is what the APIC error bits mean:
13606 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13607 u16 *bios_cpu_apicid;
13608 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13609
13610 + pax_track_stack();
13611 +
13612 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13613 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13614
13615 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13616 index 8928d97..f799cea 100644
13617 --- a/arch/x86/kernel/apic/io_apic.c
13618 +++ b/arch/x86/kernel/apic/io_apic.c
13619 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13620 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13621 GFP_ATOMIC);
13622 if (!ioapic_entries)
13623 - return 0;
13624 + return NULL;
13625
13626 for (apic = 0; apic < nr_ioapics; apic++) {
13627 ioapic_entries[apic] =
13628 @@ -733,7 +733,7 @@ nomem:
13629 kfree(ioapic_entries[apic]);
13630 kfree(ioapic_entries);
13631
13632 - return 0;
13633 + return NULL;
13634 }
13635
13636 /*
13637 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13638 }
13639 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13640
13641 -void lock_vector_lock(void)
13642 +void lock_vector_lock(void) __acquires(vector_lock)
13643 {
13644 /* Used to the online set of cpus does not change
13645 * during assign_irq_vector.
13646 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13647 spin_lock(&vector_lock);
13648 }
13649
13650 -void unlock_vector_lock(void)
13651 +void unlock_vector_lock(void) __releases(vector_lock)
13652 {
13653 spin_unlock(&vector_lock);
13654 }
13655 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13656 ack_APIC_irq();
13657 }
13658
13659 -atomic_t irq_mis_count;
13660 +atomic_unchecked_t irq_mis_count;
13661
13662 static void ack_apic_level(unsigned int irq)
13663 {
13664 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13665
13666 /* Tail end of version 0x11 I/O APIC bug workaround */
13667 if (!(v & (1 << (i & 0x1f)))) {
13668 - atomic_inc(&irq_mis_count);
13669 + atomic_inc_unchecked(&irq_mis_count);
13670 spin_lock(&ioapic_lock);
13671 __mask_and_edge_IO_APIC_irq(cfg);
13672 __unmask_and_level_IO_APIC_irq(cfg);
13673 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13674 index 151ace6..f317474 100644
13675 --- a/arch/x86/kernel/apm_32.c
13676 +++ b/arch/x86/kernel/apm_32.c
13677 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13678 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13679 * even though they are called in protected mode.
13680 */
13681 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13682 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13683 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13684
13685 static const char driver_version[] = "1.16ac"; /* no spaces */
13686 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13687 BUG_ON(cpu != 0);
13688 gdt = get_cpu_gdt_table(cpu);
13689 save_desc_40 = gdt[0x40 / 8];
13690 +
13691 + pax_open_kernel();
13692 gdt[0x40 / 8] = bad_bios_desc;
13693 + pax_close_kernel();
13694
13695 apm_irq_save(flags);
13696 APM_DO_SAVE_SEGS;
13697 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13698 &call->esi);
13699 APM_DO_RESTORE_SEGS;
13700 apm_irq_restore(flags);
13701 +
13702 + pax_open_kernel();
13703 gdt[0x40 / 8] = save_desc_40;
13704 + pax_close_kernel();
13705 +
13706 put_cpu();
13707
13708 return call->eax & 0xff;
13709 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13710 BUG_ON(cpu != 0);
13711 gdt = get_cpu_gdt_table(cpu);
13712 save_desc_40 = gdt[0x40 / 8];
13713 +
13714 + pax_open_kernel();
13715 gdt[0x40 / 8] = bad_bios_desc;
13716 + pax_close_kernel();
13717
13718 apm_irq_save(flags);
13719 APM_DO_SAVE_SEGS;
13720 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13721 &call->eax);
13722 APM_DO_RESTORE_SEGS;
13723 apm_irq_restore(flags);
13724 +
13725 + pax_open_kernel();
13726 gdt[0x40 / 8] = save_desc_40;
13727 + pax_close_kernel();
13728 +
13729 put_cpu();
13730 return error;
13731 }
13732 @@ -975,7 +989,7 @@ recalc:
13733
13734 static void apm_power_off(void)
13735 {
13736 - unsigned char po_bios_call[] = {
13737 + const unsigned char po_bios_call[] = {
13738 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13739 0x8e, 0xd0, /* movw ax,ss */
13740 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13741 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13742 * code to that CPU.
13743 */
13744 gdt = get_cpu_gdt_table(0);
13745 +
13746 + pax_open_kernel();
13747 set_desc_base(&gdt[APM_CS >> 3],
13748 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13749 set_desc_base(&gdt[APM_CS_16 >> 3],
13750 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13751 set_desc_base(&gdt[APM_DS >> 3],
13752 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13753 + pax_close_kernel();
13754
13755 proc_create("apm", 0, NULL, &apm_file_ops);
13756
13757 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13758 index dfdbf64..9b2b6ce 100644
13759 --- a/arch/x86/kernel/asm-offsets_32.c
13760 +++ b/arch/x86/kernel/asm-offsets_32.c
13761 @@ -51,7 +51,6 @@ void foo(void)
13762 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13763 BLANK();
13764
13765 - OFFSET(TI_task, thread_info, task);
13766 OFFSET(TI_exec_domain, thread_info, exec_domain);
13767 OFFSET(TI_flags, thread_info, flags);
13768 OFFSET(TI_status, thread_info, status);
13769 @@ -60,6 +59,8 @@ void foo(void)
13770 OFFSET(TI_restart_block, thread_info, restart_block);
13771 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13772 OFFSET(TI_cpu, thread_info, cpu);
13773 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13774 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13775 BLANK();
13776
13777 OFFSET(GDS_size, desc_ptr, size);
13778 @@ -99,6 +100,7 @@ void foo(void)
13779
13780 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13781 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13782 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13783 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13784 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13785 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13786 @@ -115,6 +117,11 @@ void foo(void)
13787 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13788 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13789 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13790 +
13791 +#ifdef CONFIG_PAX_KERNEXEC
13792 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13793 +#endif
13794 +
13795 #endif
13796
13797 #ifdef CONFIG_XEN
13798 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13799 index 4a6aeed..371de20 100644
13800 --- a/arch/x86/kernel/asm-offsets_64.c
13801 +++ b/arch/x86/kernel/asm-offsets_64.c
13802 @@ -44,6 +44,8 @@ int main(void)
13803 ENTRY(addr_limit);
13804 ENTRY(preempt_count);
13805 ENTRY(status);
13806 + ENTRY(lowest_stack);
13807 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13808 #ifdef CONFIG_IA32_EMULATION
13809 ENTRY(sysenter_return);
13810 #endif
13811 @@ -63,6 +65,18 @@ int main(void)
13812 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13813 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13814 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13815 +
13816 +#ifdef CONFIG_PAX_KERNEXEC
13817 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13818 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13819 +#endif
13820 +
13821 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13822 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13823 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13824 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13825 +#endif
13826 +
13827 #endif
13828
13829
13830 @@ -115,6 +129,7 @@ int main(void)
13831 ENTRY(cr8);
13832 BLANK();
13833 #undef ENTRY
13834 + DEFINE(TSS_size, sizeof(struct tss_struct));
13835 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13836 BLANK();
13837 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13838 @@ -130,6 +145,7 @@ int main(void)
13839
13840 BLANK();
13841 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13842 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13843 #ifdef CONFIG_XEN
13844 BLANK();
13845 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13846 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13847 index ff502cc..dc5133e 100644
13848 --- a/arch/x86/kernel/cpu/Makefile
13849 +++ b/arch/x86/kernel/cpu/Makefile
13850 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13851 CFLAGS_REMOVE_common.o = -pg
13852 endif
13853
13854 -# Make sure load_percpu_segment has no stackprotector
13855 -nostackp := $(call cc-option, -fno-stack-protector)
13856 -CFLAGS_common.o := $(nostackp)
13857 -
13858 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13859 obj-y += proc.o capflags.o powerflags.o common.o
13860 obj-y += vmware.o hypervisor.o sched.o
13861 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13862 index 6e082dc..a0b5f36 100644
13863 --- a/arch/x86/kernel/cpu/amd.c
13864 +++ b/arch/x86/kernel/cpu/amd.c
13865 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13866 unsigned int size)
13867 {
13868 /* AMD errata T13 (order #21922) */
13869 - if ((c->x86 == 6)) {
13870 + if (c->x86 == 6) {
13871 /* Duron Rev A0 */
13872 if (c->x86_model == 3 && c->x86_mask == 0)
13873 size = 64;
13874 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13875 index 4e34d10..ba6bc97 100644
13876 --- a/arch/x86/kernel/cpu/common.c
13877 +++ b/arch/x86/kernel/cpu/common.c
13878 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13879
13880 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13881
13882 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13883 -#ifdef CONFIG_X86_64
13884 - /*
13885 - * We need valid kernel segments for data and code in long mode too
13886 - * IRET will check the segment types kkeil 2000/10/28
13887 - * Also sysret mandates a special GDT layout
13888 - *
13889 - * TLS descriptors are currently at a different place compared to i386.
13890 - * Hopefully nobody expects them at a fixed place (Wine?)
13891 - */
13892 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13893 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13894 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13895 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13896 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13897 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13898 -#else
13899 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13900 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13901 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13902 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13903 - /*
13904 - * Segments used for calling PnP BIOS have byte granularity.
13905 - * They code segments and data segments have fixed 64k limits,
13906 - * the transfer segment sizes are set at run time.
13907 - */
13908 - /* 32-bit code */
13909 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13910 - /* 16-bit code */
13911 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13912 - /* 16-bit data */
13913 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13914 - /* 16-bit data */
13915 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13916 - /* 16-bit data */
13917 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13918 - /*
13919 - * The APM segments have byte granularity and their bases
13920 - * are set at run time. All have 64k limits.
13921 - */
13922 - /* 32-bit code */
13923 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13924 - /* 16-bit code */
13925 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13926 - /* data */
13927 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13928 -
13929 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13930 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13931 - GDT_STACK_CANARY_INIT
13932 -#endif
13933 -} };
13934 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13935 -
13936 static int __init x86_xsave_setup(char *s)
13937 {
13938 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13939 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13940 {
13941 struct desc_ptr gdt_descr;
13942
13943 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13944 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13945 gdt_descr.size = GDT_SIZE - 1;
13946 load_gdt(&gdt_descr);
13947 /* Reload the per-cpu base */
13948 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13949 /* Filter out anything that depends on CPUID levels we don't have */
13950 filter_cpuid_features(c, true);
13951
13952 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13953 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13954 +#endif
13955 +
13956 /* If the model name is still unset, do table lookup. */
13957 if (!c->x86_model_id[0]) {
13958 const char *p;
13959 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13960 }
13961 __setup("clearcpuid=", setup_disablecpuid);
13962
13963 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13964 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13965 +
13966 #ifdef CONFIG_X86_64
13967 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13968
13969 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13970 EXPORT_PER_CPU_SYMBOL(current_task);
13971
13972 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13973 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13974 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13975 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13976
13977 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13978 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13979 {
13980 memset(regs, 0, sizeof(struct pt_regs));
13981 regs->fs = __KERNEL_PERCPU;
13982 - regs->gs = __KERNEL_STACK_CANARY;
13983 + savesegment(gs, regs->gs);
13984
13985 return regs;
13986 }
13987 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13988 int i;
13989
13990 cpu = stack_smp_processor_id();
13991 - t = &per_cpu(init_tss, cpu);
13992 + t = init_tss + cpu;
13993 orig_ist = &per_cpu(orig_ist, cpu);
13994
13995 #ifdef CONFIG_NUMA
13996 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13997 switch_to_new_gdt(cpu);
13998 loadsegment(fs, 0);
13999
14000 - load_idt((const struct desc_ptr *)&idt_descr);
14001 + load_idt(&idt_descr);
14002
14003 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14004 syscall_init();
14005 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
14006 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14007 barrier();
14008
14009 - check_efer();
14010 if (cpu != 0)
14011 enable_x2apic();
14012
14013 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
14014 {
14015 int cpu = smp_processor_id();
14016 struct task_struct *curr = current;
14017 - struct tss_struct *t = &per_cpu(init_tss, cpu);
14018 + struct tss_struct *t = init_tss + cpu;
14019 struct thread_struct *thread = &curr->thread;
14020
14021 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14022 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14023 index 6a77cca..4f4fca0 100644
14024 --- a/arch/x86/kernel/cpu/intel.c
14025 +++ b/arch/x86/kernel/cpu/intel.c
14026 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14027 * Update the IDT descriptor and reload the IDT so that
14028 * it uses the read-only mapped virtual address.
14029 */
14030 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14031 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14032 load_idt(&idt_descr);
14033 }
14034 #endif
14035 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
14036 index 417990f..96dc36b 100644
14037 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
14038 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
14039 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14040 return ret;
14041 }
14042
14043 -static struct sysfs_ops sysfs_ops = {
14044 +static const struct sysfs_ops sysfs_ops = {
14045 .show = show,
14046 .store = store,
14047 };
14048 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14049 index 472763d..9831e11 100644
14050 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
14051 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14052 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14053 static int inject_init(void)
14054 {
14055 printk(KERN_INFO "Machine check injector initialized\n");
14056 - mce_chrdev_ops.write = mce_write;
14057 + pax_open_kernel();
14058 + *(void **)&mce_chrdev_ops.write = mce_write;
14059 + pax_close_kernel();
14060 register_die_notifier(&mce_raise_nb);
14061 return 0;
14062 }
14063 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14064 index 0f16a2b..21740f5 100644
14065 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14066 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14067 @@ -43,6 +43,7 @@
14068 #include <asm/ipi.h>
14069 #include <asm/mce.h>
14070 #include <asm/msr.h>
14071 +#include <asm/local.h>
14072
14073 #include "mce-internal.h"
14074
14075 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
14076 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14077 m->cs, m->ip);
14078
14079 - if (m->cs == __KERNEL_CS)
14080 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14081 print_symbol("{%s}", m->ip);
14082 pr_cont("\n");
14083 }
14084 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
14085
14086 #define PANIC_TIMEOUT 5 /* 5 seconds */
14087
14088 -static atomic_t mce_paniced;
14089 +static atomic_unchecked_t mce_paniced;
14090
14091 static int fake_panic;
14092 -static atomic_t mce_fake_paniced;
14093 +static atomic_unchecked_t mce_fake_paniced;
14094
14095 /* Panic in progress. Enable interrupts and wait for final IPI */
14096 static void wait_for_panic(void)
14097 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14098 /*
14099 * Make sure only one CPU runs in machine check panic
14100 */
14101 - if (atomic_inc_return(&mce_paniced) > 1)
14102 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14103 wait_for_panic();
14104 barrier();
14105
14106 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14107 console_verbose();
14108 } else {
14109 /* Don't log too much for fake panic */
14110 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14111 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14112 return;
14113 }
14114 print_mce_head();
14115 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
14116 * might have been modified by someone else.
14117 */
14118 rmb();
14119 - if (atomic_read(&mce_paniced))
14120 + if (atomic_read_unchecked(&mce_paniced))
14121 wait_for_panic();
14122 if (!monarch_timeout)
14123 goto out;
14124 @@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14125 }
14126
14127 /* Call the installed machine check handler for this CPU setup. */
14128 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14129 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14130 unexpected_machine_check;
14131
14132 /*
14133 @@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14134 return;
14135 }
14136
14137 + pax_open_kernel();
14138 machine_check_vector = do_machine_check;
14139 + pax_close_kernel();
14140
14141 mce_init();
14142 mce_cpu_features(c);
14143 @@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14144 */
14145
14146 static DEFINE_SPINLOCK(mce_state_lock);
14147 -static int open_count; /* #times opened */
14148 +static local_t open_count; /* #times opened */
14149 static int open_exclu; /* already open exclusive? */
14150
14151 static int mce_open(struct inode *inode, struct file *file)
14152 {
14153 spin_lock(&mce_state_lock);
14154
14155 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
14156 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
14157 spin_unlock(&mce_state_lock);
14158
14159 return -EBUSY;
14160 @@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
14161
14162 if (file->f_flags & O_EXCL)
14163 open_exclu = 1;
14164 - open_count++;
14165 + local_inc(&open_count);
14166
14167 spin_unlock(&mce_state_lock);
14168
14169 @@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
14170 {
14171 spin_lock(&mce_state_lock);
14172
14173 - open_count--;
14174 + local_dec(&open_count);
14175 open_exclu = 0;
14176
14177 spin_unlock(&mce_state_lock);
14178 @@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
14179 static void mce_reset(void)
14180 {
14181 cpu_missing = 0;
14182 - atomic_set(&mce_fake_paniced, 0);
14183 + atomic_set_unchecked(&mce_fake_paniced, 0);
14184 atomic_set(&mce_executing, 0);
14185 atomic_set(&mce_callin, 0);
14186 atomic_set(&global_nwo, 0);
14187 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14188 index ef3cd31..9d2f6ab 100644
14189 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
14190 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14191 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14192 return ret;
14193 }
14194
14195 -static struct sysfs_ops threshold_ops = {
14196 +static const struct sysfs_ops threshold_ops = {
14197 .show = show,
14198 .store = store,
14199 };
14200 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14201 index 5c0e653..0882b0a 100644
14202 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14203 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14204 @@ -12,6 +12,7 @@
14205 #include <asm/system.h>
14206 #include <asm/mce.h>
14207 #include <asm/msr.h>
14208 +#include <asm/pgtable.h>
14209
14210 /* By default disabled */
14211 int mce_p5_enabled __read_mostly;
14212 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14213 if (!cpu_has(c, X86_FEATURE_MCE))
14214 return;
14215
14216 + pax_open_kernel();
14217 machine_check_vector = pentium_machine_check;
14218 + pax_close_kernel();
14219 /* Make sure the vector pointer is visible before we enable MCEs: */
14220 wmb();
14221
14222 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14223 index 54060f5..c1a7577 100644
14224 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14225 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14226 @@ -11,6 +11,7 @@
14227 #include <asm/system.h>
14228 #include <asm/mce.h>
14229 #include <asm/msr.h>
14230 +#include <asm/pgtable.h>
14231
14232 /* Machine check handler for WinChip C6: */
14233 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14234 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14235 {
14236 u32 lo, hi;
14237
14238 + pax_open_kernel();
14239 machine_check_vector = winchip_machine_check;
14240 + pax_close_kernel();
14241 /* Make sure the vector pointer is visible before we enable MCEs: */
14242 wmb();
14243
14244 diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
14245 index 33af141..92ba9cd 100644
14246 --- a/arch/x86/kernel/cpu/mtrr/amd.c
14247 +++ b/arch/x86/kernel/cpu/mtrr/amd.c
14248 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
14249 return 0;
14250 }
14251
14252 -static struct mtrr_ops amd_mtrr_ops = {
14253 +static const struct mtrr_ops amd_mtrr_ops = {
14254 .vendor = X86_VENDOR_AMD,
14255 .set = amd_set_mtrr,
14256 .get = amd_get_mtrr,
14257 diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
14258 index de89f14..316fe3e 100644
14259 --- a/arch/x86/kernel/cpu/mtrr/centaur.c
14260 +++ b/arch/x86/kernel/cpu/mtrr/centaur.c
14261 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
14262 return 0;
14263 }
14264
14265 -static struct mtrr_ops centaur_mtrr_ops = {
14266 +static const struct mtrr_ops centaur_mtrr_ops = {
14267 .vendor = X86_VENDOR_CENTAUR,
14268 .set = centaur_set_mcr,
14269 .get = centaur_get_mcr,
14270 diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
14271 index 228d982..68a3343 100644
14272 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c
14273 +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
14274 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
14275 post_set();
14276 }
14277
14278 -static struct mtrr_ops cyrix_mtrr_ops = {
14279 +static const struct mtrr_ops cyrix_mtrr_ops = {
14280 .vendor = X86_VENDOR_CYRIX,
14281 .set_all = cyrix_set_all,
14282 .set = cyrix_set_arr,
14283 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
14284 index 55da0c5..4d75584 100644
14285 --- a/arch/x86/kernel/cpu/mtrr/generic.c
14286 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
14287 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14288 /*
14289 * Generic structure...
14290 */
14291 -struct mtrr_ops generic_mtrr_ops = {
14292 +const struct mtrr_ops generic_mtrr_ops = {
14293 .use_intel_if = 1,
14294 .set_all = generic_set_all,
14295 .get = generic_get_mtrr,
14296 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14297 index fd60f09..c94ef52 100644
14298 --- a/arch/x86/kernel/cpu/mtrr/main.c
14299 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14300 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14301 u64 size_or_mask, size_and_mask;
14302 static bool mtrr_aps_delayed_init;
14303
14304 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14305 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14306
14307 -struct mtrr_ops *mtrr_if;
14308 +const struct mtrr_ops *mtrr_if;
14309
14310 static void set_mtrr(unsigned int reg, unsigned long base,
14311 unsigned long size, mtrr_type type);
14312
14313 -void set_mtrr_ops(struct mtrr_ops *ops)
14314 +void set_mtrr_ops(const struct mtrr_ops *ops)
14315 {
14316 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14317 mtrr_ops[ops->vendor] = ops;
14318 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14319 index a501dee..816c719 100644
14320 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14321 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14322 @@ -25,14 +25,14 @@ struct mtrr_ops {
14323 int (*validate_add_page)(unsigned long base, unsigned long size,
14324 unsigned int type);
14325 int (*have_wrcomb)(void);
14326 -};
14327 +} __do_const;
14328
14329 extern int generic_get_free_region(unsigned long base, unsigned long size,
14330 int replace_reg);
14331 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14332 unsigned int type);
14333
14334 -extern struct mtrr_ops generic_mtrr_ops;
14335 +extern const struct mtrr_ops generic_mtrr_ops;
14336
14337 extern int positive_have_wrcomb(void);
14338
14339 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14340 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14341 void get_mtrr_state(void);
14342
14343 -extern void set_mtrr_ops(struct mtrr_ops *ops);
14344 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
14345
14346 extern u64 size_or_mask, size_and_mask;
14347 -extern struct mtrr_ops *mtrr_if;
14348 +extern const struct mtrr_ops *mtrr_if;
14349
14350 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14351 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14352 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14353 index 0ff02ca..fc49a60 100644
14354 --- a/arch/x86/kernel/cpu/perf_event.c
14355 +++ b/arch/x86/kernel/cpu/perf_event.c
14356 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14357 * count to the generic event atomically:
14358 */
14359 again:
14360 - prev_raw_count = atomic64_read(&hwc->prev_count);
14361 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14362 rdmsrl(hwc->event_base + idx, new_raw_count);
14363
14364 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14365 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14366 new_raw_count) != prev_raw_count)
14367 goto again;
14368
14369 @@ -741,7 +741,7 @@ again:
14370 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14371 delta >>= shift;
14372
14373 - atomic64_add(delta, &event->count);
14374 + atomic64_add_unchecked(delta, &event->count);
14375 atomic64_sub(delta, &hwc->period_left);
14376
14377 return new_raw_count;
14378 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14379 * The hw event starts counting from this event offset,
14380 * mark it to be able to extra future deltas:
14381 */
14382 - atomic64_set(&hwc->prev_count, (u64)-left);
14383 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14384
14385 err = checking_wrmsrl(hwc->event_base + idx,
14386 (u64)(-left) & x86_pmu.event_mask);
14387 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14388 break;
14389
14390 callchain_store(entry, frame.return_address);
14391 - fp = frame.next_frame;
14392 + fp = (__force const void __user *)frame.next_frame;
14393 }
14394 }
14395
14396 diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14397 index 898df97..9e82503 100644
14398 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14399 +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14400 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14401
14402 /* Interface defining a CPU specific perfctr watchdog */
14403 struct wd_ops {
14404 - int (*reserve)(void);
14405 - void (*unreserve)(void);
14406 - int (*setup)(unsigned nmi_hz);
14407 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14408 - void (*stop)(void);
14409 + int (* const reserve)(void);
14410 + void (* const unreserve)(void);
14411 + int (* const setup)(unsigned nmi_hz);
14412 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14413 + void (* const stop)(void);
14414 unsigned perfctr;
14415 unsigned evntsel;
14416 u64 checkbit;
14417 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14418 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14419 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14420
14421 +/* cannot be const */
14422 static struct wd_ops intel_arch_wd_ops;
14423
14424 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14425 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14426 return 1;
14427 }
14428
14429 +/* cannot be const */
14430 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14431 .reserve = single_msr_reserve,
14432 .unreserve = single_msr_unreserve,
14433 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14434 index ff95824..2ffdcb5 100644
14435 --- a/arch/x86/kernel/crash.c
14436 +++ b/arch/x86/kernel/crash.c
14437 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14438 regs = args->regs;
14439
14440 #ifdef CONFIG_X86_32
14441 - if (!user_mode_vm(regs)) {
14442 + if (!user_mode(regs)) {
14443 crash_fixup_ss_esp(&fixed_regs, regs);
14444 regs = &fixed_regs;
14445 }
14446 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14447 index 37250fe..bf2ec74 100644
14448 --- a/arch/x86/kernel/doublefault_32.c
14449 +++ b/arch/x86/kernel/doublefault_32.c
14450 @@ -11,7 +11,7 @@
14451
14452 #define DOUBLEFAULT_STACKSIZE (1024)
14453 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14454 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14455 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14456
14457 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14458
14459 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14460 unsigned long gdt, tss;
14461
14462 store_gdt(&gdt_desc);
14463 - gdt = gdt_desc.address;
14464 + gdt = (unsigned long)gdt_desc.address;
14465
14466 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14467
14468 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14469 /* 0x2 bit is always set */
14470 .flags = X86_EFLAGS_SF | 0x2,
14471 .sp = STACK_START,
14472 - .es = __USER_DS,
14473 + .es = __KERNEL_DS,
14474 .cs = __KERNEL_CS,
14475 .ss = __KERNEL_DS,
14476 - .ds = __USER_DS,
14477 + .ds = __KERNEL_DS,
14478 .fs = __KERNEL_PERCPU,
14479
14480 .__cr3 = __pa_nodebug(swapper_pg_dir),
14481 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14482 index 2d8a371..4fa6ae6 100644
14483 --- a/arch/x86/kernel/dumpstack.c
14484 +++ b/arch/x86/kernel/dumpstack.c
14485 @@ -2,6 +2,9 @@
14486 * Copyright (C) 1991, 1992 Linus Torvalds
14487 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14488 */
14489 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14490 +#define __INCLUDED_BY_HIDESYM 1
14491 +#endif
14492 #include <linux/kallsyms.h>
14493 #include <linux/kprobes.h>
14494 #include <linux/uaccess.h>
14495 @@ -28,7 +31,7 @@ static int die_counter;
14496
14497 void printk_address(unsigned long address, int reliable)
14498 {
14499 - printk(" [<%p>] %s%pS\n", (void *) address,
14500 + printk(" [<%p>] %s%pA\n", (void *) address,
14501 reliable ? "" : "? ", (void *) address);
14502 }
14503
14504 @@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14505 static void
14506 print_ftrace_graph_addr(unsigned long addr, void *data,
14507 const struct stacktrace_ops *ops,
14508 - struct thread_info *tinfo, int *graph)
14509 + struct task_struct *task, int *graph)
14510 {
14511 - struct task_struct *task = tinfo->task;
14512 unsigned long ret_addr;
14513 int index = task->curr_ret_stack;
14514
14515 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14516 static inline void
14517 print_ftrace_graph_addr(unsigned long addr, void *data,
14518 const struct stacktrace_ops *ops,
14519 - struct thread_info *tinfo, int *graph)
14520 + struct task_struct *task, int *graph)
14521 { }
14522 #endif
14523
14524 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14525 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14526 */
14527
14528 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14529 - void *p, unsigned int size, void *end)
14530 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14531 {
14532 - void *t = tinfo;
14533 if (end) {
14534 if (p < end && p >= (end-THREAD_SIZE))
14535 return 1;
14536 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14537 }
14538
14539 unsigned long
14540 -print_context_stack(struct thread_info *tinfo,
14541 +print_context_stack(struct task_struct *task, void *stack_start,
14542 unsigned long *stack, unsigned long bp,
14543 const struct stacktrace_ops *ops, void *data,
14544 unsigned long *end, int *graph)
14545 {
14546 struct stack_frame *frame = (struct stack_frame *)bp;
14547
14548 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14549 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14550 unsigned long addr;
14551
14552 addr = *stack;
14553 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14554 } else {
14555 ops->address(data, addr, 0);
14556 }
14557 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14558 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14559 }
14560 stack++;
14561 }
14562 @@ -180,7 +180,7 @@ void dump_stack(void)
14563 #endif
14564
14565 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14566 - current->pid, current->comm, print_tainted(),
14567 + task_pid_nr(current), current->comm, print_tainted(),
14568 init_utsname()->release,
14569 (int)strcspn(init_utsname()->version, " "),
14570 init_utsname()->version);
14571 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14572 return flags;
14573 }
14574
14575 +extern void gr_handle_kernel_exploit(void);
14576 +
14577 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14578 {
14579 if (regs && kexec_should_crash(current))
14580 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14581 panic("Fatal exception in interrupt");
14582 if (panic_on_oops)
14583 panic("Fatal exception");
14584 - do_exit(signr);
14585 +
14586 + gr_handle_kernel_exploit();
14587 +
14588 + do_group_exit(signr);
14589 }
14590
14591 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14592 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14593 unsigned long flags = oops_begin();
14594 int sig = SIGSEGV;
14595
14596 - if (!user_mode_vm(regs))
14597 + if (!user_mode(regs))
14598 report_bug(regs->ip, regs);
14599
14600 if (__die(str, regs, err))
14601 diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14602 index 81086c2..13e8b17 100644
14603 --- a/arch/x86/kernel/dumpstack.h
14604 +++ b/arch/x86/kernel/dumpstack.h
14605 @@ -15,7 +15,7 @@
14606 #endif
14607
14608 extern unsigned long
14609 -print_context_stack(struct thread_info *tinfo,
14610 +print_context_stack(struct task_struct *task, void *stack_start,
14611 unsigned long *stack, unsigned long bp,
14612 const struct stacktrace_ops *ops, void *data,
14613 unsigned long *end, int *graph);
14614 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14615 index f7dd2a7..504f53b 100644
14616 --- a/arch/x86/kernel/dumpstack_32.c
14617 +++ b/arch/x86/kernel/dumpstack_32.c
14618 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14619 #endif
14620
14621 for (;;) {
14622 - struct thread_info *context;
14623 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14624 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14625
14626 - context = (struct thread_info *)
14627 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14628 - bp = print_context_stack(context, stack, bp, ops,
14629 - data, NULL, &graph);
14630 -
14631 - stack = (unsigned long *)context->previous_esp;
14632 - if (!stack)
14633 + if (stack_start == task_stack_page(task))
14634 break;
14635 + stack = *(unsigned long **)stack_start;
14636 if (ops->stack(data, "IRQ") < 0)
14637 break;
14638 touch_nmi_watchdog();
14639 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14640 * When in-kernel, we also print out the stack and code at the
14641 * time of the fault..
14642 */
14643 - if (!user_mode_vm(regs)) {
14644 + if (!user_mode(regs)) {
14645 unsigned int code_prologue = code_bytes * 43 / 64;
14646 unsigned int code_len = code_bytes;
14647 unsigned char c;
14648 u8 *ip;
14649 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14650
14651 printk(KERN_EMERG "Stack:\n");
14652 show_stack_log_lvl(NULL, regs, &regs->sp,
14653 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14654
14655 printk(KERN_EMERG "Code: ");
14656
14657 - ip = (u8 *)regs->ip - code_prologue;
14658 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14659 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14660 /* try starting at IP */
14661 - ip = (u8 *)regs->ip;
14662 + ip = (u8 *)regs->ip + cs_base;
14663 code_len = code_len - code_prologue + 1;
14664 }
14665 for (i = 0; i < code_len; i++, ip++) {
14666 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14667 printk(" Bad EIP value.");
14668 break;
14669 }
14670 - if (ip == (u8 *)regs->ip)
14671 + if (ip == (u8 *)regs->ip + cs_base)
14672 printk("<%02x> ", c);
14673 else
14674 printk("%02x ", c);
14675 @@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14676 printk("\n");
14677 }
14678
14679 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14680 +void pax_check_alloca(unsigned long size)
14681 +{
14682 + unsigned long sp = (unsigned long)&sp, stack_left;
14683 +
14684 + /* all kernel stacks are of the same size */
14685 + stack_left = sp & (THREAD_SIZE - 1);
14686 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14687 +}
14688 +EXPORT_SYMBOL(pax_check_alloca);
14689 +#endif
14690 +
14691 int is_valid_bugaddr(unsigned long ip)
14692 {
14693 unsigned short ud2;
14694
14695 + ip = ktla_ktva(ip);
14696 if (ip < PAGE_OFFSET)
14697 return 0;
14698 if (probe_kernel_address((unsigned short *)ip, ud2))
14699 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14700 index a071e6b..36cd585 100644
14701 --- a/arch/x86/kernel/dumpstack_64.c
14702 +++ b/arch/x86/kernel/dumpstack_64.c
14703 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14704 unsigned long *irq_stack_end =
14705 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14706 unsigned used = 0;
14707 - struct thread_info *tinfo;
14708 int graph = 0;
14709 + void *stack_start;
14710
14711 if (!task)
14712 task = current;
14713 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14714 * current stack address. If the stacks consist of nested
14715 * exceptions
14716 */
14717 - tinfo = task_thread_info(task);
14718 for (;;) {
14719 char *id;
14720 unsigned long *estack_end;
14721 +
14722 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14723 &used, &id);
14724
14725 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14726 if (ops->stack(data, id) < 0)
14727 break;
14728
14729 - bp = print_context_stack(tinfo, stack, bp, ops,
14730 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14731 data, estack_end, &graph);
14732 ops->stack(data, "<EOE>");
14733 /*
14734 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14735 if (stack >= irq_stack && stack < irq_stack_end) {
14736 if (ops->stack(data, "IRQ") < 0)
14737 break;
14738 - bp = print_context_stack(tinfo, stack, bp,
14739 + bp = print_context_stack(task, irq_stack, stack, bp,
14740 ops, data, irq_stack_end, &graph);
14741 /*
14742 * We link to the next stack (which would be
14743 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14744 /*
14745 * This handles the process stack:
14746 */
14747 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14748 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14749 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14750 put_cpu();
14751 }
14752 EXPORT_SYMBOL(dump_trace);
14753 @@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14754 return ud2 == 0x0b0f;
14755 }
14756
14757 +
14758 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14759 +void pax_check_alloca(unsigned long size)
14760 +{
14761 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14762 + unsigned cpu, used;
14763 + char *id;
14764 +
14765 + /* check the process stack first */
14766 + stack_start = (unsigned long)task_stack_page(current);
14767 + stack_end = stack_start + THREAD_SIZE;
14768 + if (likely(stack_start <= sp && sp < stack_end)) {
14769 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14770 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14771 + return;
14772 + }
14773 +
14774 + cpu = get_cpu();
14775 +
14776 + /* check the irq stacks */
14777 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14778 + stack_start = stack_end - IRQ_STACK_SIZE;
14779 + if (stack_start <= sp && sp < stack_end) {
14780 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14781 + put_cpu();
14782 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14783 + return;
14784 + }
14785 +
14786 + /* check the exception stacks */
14787 + used = 0;
14788 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14789 + stack_start = stack_end - EXCEPTION_STKSZ;
14790 + if (stack_end && stack_start <= sp && sp < stack_end) {
14791 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14792 + put_cpu();
14793 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14794 + return;
14795 + }
14796 +
14797 + put_cpu();
14798 +
14799 + /* unknown stack */
14800 + BUG();
14801 +}
14802 +EXPORT_SYMBOL(pax_check_alloca);
14803 +#endif
14804 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14805 index a89739a..95e0c48 100644
14806 --- a/arch/x86/kernel/e820.c
14807 +++ b/arch/x86/kernel/e820.c
14808 @@ -733,7 +733,7 @@ struct early_res {
14809 };
14810 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14811 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14812 - {}
14813 + { 0, 0, {0}, 0 }
14814 };
14815
14816 static int __init find_overlapped_early(u64 start, u64 end)
14817 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14818 index b9c830c..1e41a96 100644
14819 --- a/arch/x86/kernel/early_printk.c
14820 +++ b/arch/x86/kernel/early_printk.c
14821 @@ -7,6 +7,7 @@
14822 #include <linux/pci_regs.h>
14823 #include <linux/pci_ids.h>
14824 #include <linux/errno.h>
14825 +#include <linux/sched.h>
14826 #include <asm/io.h>
14827 #include <asm/processor.h>
14828 #include <asm/fcntl.h>
14829 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14830 int n;
14831 va_list ap;
14832
14833 + pax_track_stack();
14834 +
14835 va_start(ap, fmt);
14836 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14837 early_console->write(early_console, buf, n);
14838 diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14839 index 5cab48e..b025f9b 100644
14840 --- a/arch/x86/kernel/efi_32.c
14841 +++ b/arch/x86/kernel/efi_32.c
14842 @@ -38,70 +38,56 @@
14843 */
14844
14845 static unsigned long efi_rt_eflags;
14846 -static pgd_t efi_bak_pg_dir_pointer[2];
14847 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14848
14849 -void efi_call_phys_prelog(void)
14850 +void __init efi_call_phys_prelog(void)
14851 {
14852 - unsigned long cr4;
14853 - unsigned long temp;
14854 struct desc_ptr gdt_descr;
14855
14856 +#ifdef CONFIG_PAX_KERNEXEC
14857 + struct desc_struct d;
14858 +#endif
14859 +
14860 local_irq_save(efi_rt_eflags);
14861
14862 - /*
14863 - * If I don't have PAE, I should just duplicate two entries in page
14864 - * directory. If I have PAE, I just need to duplicate one entry in
14865 - * page directory.
14866 - */
14867 - cr4 = read_cr4_safe();
14868 -
14869 - if (cr4 & X86_CR4_PAE) {
14870 - efi_bak_pg_dir_pointer[0].pgd =
14871 - swapper_pg_dir[pgd_index(0)].pgd;
14872 - swapper_pg_dir[0].pgd =
14873 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14874 - } else {
14875 - efi_bak_pg_dir_pointer[0].pgd =
14876 - swapper_pg_dir[pgd_index(0)].pgd;
14877 - efi_bak_pg_dir_pointer[1].pgd =
14878 - swapper_pg_dir[pgd_index(0x400000)].pgd;
14879 - swapper_pg_dir[pgd_index(0)].pgd =
14880 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14881 - temp = PAGE_OFFSET + 0x400000;
14882 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14883 - swapper_pg_dir[pgd_index(temp)].pgd;
14884 - }
14885 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14886 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14887 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14888
14889 /*
14890 * After the lock is released, the original page table is restored.
14891 */
14892 __flush_tlb_all();
14893
14894 +#ifdef CONFIG_PAX_KERNEXEC
14895 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14896 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14897 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14898 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14899 +#endif
14900 +
14901 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14902 gdt_descr.size = GDT_SIZE - 1;
14903 load_gdt(&gdt_descr);
14904 }
14905
14906 -void efi_call_phys_epilog(void)
14907 +void __init efi_call_phys_epilog(void)
14908 {
14909 - unsigned long cr4;
14910 struct desc_ptr gdt_descr;
14911
14912 +#ifdef CONFIG_PAX_KERNEXEC
14913 + struct desc_struct d;
14914 +
14915 + memset(&d, 0, sizeof d);
14916 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14917 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14918 +#endif
14919 +
14920 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14921 gdt_descr.size = GDT_SIZE - 1;
14922 load_gdt(&gdt_descr);
14923
14924 - cr4 = read_cr4_safe();
14925 -
14926 - if (cr4 & X86_CR4_PAE) {
14927 - swapper_pg_dir[pgd_index(0)].pgd =
14928 - efi_bak_pg_dir_pointer[0].pgd;
14929 - } else {
14930 - swapper_pg_dir[pgd_index(0)].pgd =
14931 - efi_bak_pg_dir_pointer[0].pgd;
14932 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14933 - efi_bak_pg_dir_pointer[1].pgd;
14934 - }
14935 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14936
14937 /*
14938 * After the lock is released, the original page table is restored.
14939 diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14940 index fbe66e6..c5c0dd2 100644
14941 --- a/arch/x86/kernel/efi_stub_32.S
14942 +++ b/arch/x86/kernel/efi_stub_32.S
14943 @@ -6,7 +6,9 @@
14944 */
14945
14946 #include <linux/linkage.h>
14947 +#include <linux/init.h>
14948 #include <asm/page_types.h>
14949 +#include <asm/segment.h>
14950
14951 /*
14952 * efi_call_phys(void *, ...) is a function with variable parameters.
14953 @@ -20,7 +22,7 @@
14954 * service functions will comply with gcc calling convention, too.
14955 */
14956
14957 -.text
14958 +__INIT
14959 ENTRY(efi_call_phys)
14960 /*
14961 * 0. The function can only be called in Linux kernel. So CS has been
14962 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14963 * The mapping of lower virtual memory has been created in prelog and
14964 * epilog.
14965 */
14966 - movl $1f, %edx
14967 - subl $__PAGE_OFFSET, %edx
14968 - jmp *%edx
14969 + movl $(__KERNEXEC_EFI_DS), %edx
14970 + mov %edx, %ds
14971 + mov %edx, %es
14972 + mov %edx, %ss
14973 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14974 1:
14975
14976 /*
14977 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14978 * parameter 2, ..., param n. To make things easy, we save the return
14979 * address of efi_call_phys in a global variable.
14980 */
14981 - popl %edx
14982 - movl %edx, saved_return_addr
14983 - /* get the function pointer into ECX*/
14984 - popl %ecx
14985 - movl %ecx, efi_rt_function_ptr
14986 - movl $2f, %edx
14987 - subl $__PAGE_OFFSET, %edx
14988 - pushl %edx
14989 + popl (saved_return_addr)
14990 + popl (efi_rt_function_ptr)
14991
14992 /*
14993 * 3. Clear PG bit in %CR0.
14994 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14995 /*
14996 * 5. Call the physical function.
14997 */
14998 - jmp *%ecx
14999 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
15000
15001 -2:
15002 /*
15003 * 6. After EFI runtime service returns, control will return to
15004 * following instruction. We'd better readjust stack pointer first.
15005 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
15006 movl %cr0, %edx
15007 orl $0x80000000, %edx
15008 movl %edx, %cr0
15009 - jmp 1f
15010 -1:
15011 +
15012 /*
15013 * 8. Now restore the virtual mode from flat mode by
15014 * adding EIP with PAGE_OFFSET.
15015 */
15016 - movl $1f, %edx
15017 - jmp *%edx
15018 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
15019 1:
15020 + movl $(__KERNEL_DS), %edx
15021 + mov %edx, %ds
15022 + mov %edx, %es
15023 + mov %edx, %ss
15024
15025 /*
15026 * 9. Balance the stack. And because EAX contain the return value,
15027 * we'd better not clobber it.
15028 */
15029 - leal efi_rt_function_ptr, %edx
15030 - movl (%edx), %ecx
15031 - pushl %ecx
15032 + pushl (efi_rt_function_ptr)
15033
15034 /*
15035 - * 10. Push the saved return address onto the stack and return.
15036 + * 10. Return to the saved return address.
15037 */
15038 - leal saved_return_addr, %edx
15039 - movl (%edx), %ecx
15040 - pushl %ecx
15041 - ret
15042 + jmpl *(saved_return_addr)
15043 ENDPROC(efi_call_phys)
15044 .previous
15045
15046 -.data
15047 +__INITDATA
15048 saved_return_addr:
15049 .long 0
15050 efi_rt_function_ptr:
15051 diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
15052 index 4c07cca..2c8427d 100644
15053 --- a/arch/x86/kernel/efi_stub_64.S
15054 +++ b/arch/x86/kernel/efi_stub_64.S
15055 @@ -7,6 +7,7 @@
15056 */
15057
15058 #include <linux/linkage.h>
15059 +#include <asm/alternative-asm.h>
15060
15061 #define SAVE_XMM \
15062 mov %rsp, %rax; \
15063 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
15064 call *%rdi
15065 addq $32, %rsp
15066 RESTORE_XMM
15067 + pax_force_retaddr 0, 1
15068 ret
15069 ENDPROC(efi_call0)
15070
15071 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
15072 call *%rdi
15073 addq $32, %rsp
15074 RESTORE_XMM
15075 + pax_force_retaddr 0, 1
15076 ret
15077 ENDPROC(efi_call1)
15078
15079 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
15080 call *%rdi
15081 addq $32, %rsp
15082 RESTORE_XMM
15083 + pax_force_retaddr 0, 1
15084 ret
15085 ENDPROC(efi_call2)
15086
15087 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
15088 call *%rdi
15089 addq $32, %rsp
15090 RESTORE_XMM
15091 + pax_force_retaddr 0, 1
15092 ret
15093 ENDPROC(efi_call3)
15094
15095 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
15096 call *%rdi
15097 addq $32, %rsp
15098 RESTORE_XMM
15099 + pax_force_retaddr 0, 1
15100 ret
15101 ENDPROC(efi_call4)
15102
15103 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
15104 call *%rdi
15105 addq $48, %rsp
15106 RESTORE_XMM
15107 + pax_force_retaddr 0, 1
15108 ret
15109 ENDPROC(efi_call5)
15110
15111 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
15112 call *%rdi
15113 addq $48, %rsp
15114 RESTORE_XMM
15115 + pax_force_retaddr 0, 1
15116 ret
15117 ENDPROC(efi_call6)
15118 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
15119 index c097e7d..c689cf4 100644
15120 --- a/arch/x86/kernel/entry_32.S
15121 +++ b/arch/x86/kernel/entry_32.S
15122 @@ -185,13 +185,146 @@
15123 /*CFI_REL_OFFSET gs, PT_GS*/
15124 .endm
15125 .macro SET_KERNEL_GS reg
15126 +
15127 +#ifdef CONFIG_CC_STACKPROTECTOR
15128 movl $(__KERNEL_STACK_CANARY), \reg
15129 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15130 + movl $(__USER_DS), \reg
15131 +#else
15132 + xorl \reg, \reg
15133 +#endif
15134 +
15135 movl \reg, %gs
15136 .endm
15137
15138 #endif /* CONFIG_X86_32_LAZY_GS */
15139
15140 -.macro SAVE_ALL
15141 +.macro pax_enter_kernel
15142 +#ifdef CONFIG_PAX_KERNEXEC
15143 + call pax_enter_kernel
15144 +#endif
15145 +.endm
15146 +
15147 +.macro pax_exit_kernel
15148 +#ifdef CONFIG_PAX_KERNEXEC
15149 + call pax_exit_kernel
15150 +#endif
15151 +.endm
15152 +
15153 +#ifdef CONFIG_PAX_KERNEXEC
15154 +ENTRY(pax_enter_kernel)
15155 +#ifdef CONFIG_PARAVIRT
15156 + pushl %eax
15157 + pushl %ecx
15158 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
15159 + mov %eax, %esi
15160 +#else
15161 + mov %cr0, %esi
15162 +#endif
15163 + bts $16, %esi
15164 + jnc 1f
15165 + mov %cs, %esi
15166 + cmp $__KERNEL_CS, %esi
15167 + jz 3f
15168 + ljmp $__KERNEL_CS, $3f
15169 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
15170 +2:
15171 +#ifdef CONFIG_PARAVIRT
15172 + mov %esi, %eax
15173 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15174 +#else
15175 + mov %esi, %cr0
15176 +#endif
15177 +3:
15178 +#ifdef CONFIG_PARAVIRT
15179 + popl %ecx
15180 + popl %eax
15181 +#endif
15182 + ret
15183 +ENDPROC(pax_enter_kernel)
15184 +
15185 +ENTRY(pax_exit_kernel)
15186 +#ifdef CONFIG_PARAVIRT
15187 + pushl %eax
15188 + pushl %ecx
15189 +#endif
15190 + mov %cs, %esi
15191 + cmp $__KERNEXEC_KERNEL_CS, %esi
15192 + jnz 2f
15193 +#ifdef CONFIG_PARAVIRT
15194 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15195 + mov %eax, %esi
15196 +#else
15197 + mov %cr0, %esi
15198 +#endif
15199 + btr $16, %esi
15200 + ljmp $__KERNEL_CS, $1f
15201 +1:
15202 +#ifdef CONFIG_PARAVIRT
15203 + mov %esi, %eax
15204 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15205 +#else
15206 + mov %esi, %cr0
15207 +#endif
15208 +2:
15209 +#ifdef CONFIG_PARAVIRT
15210 + popl %ecx
15211 + popl %eax
15212 +#endif
15213 + ret
15214 +ENDPROC(pax_exit_kernel)
15215 +#endif
15216 +
15217 +.macro pax_erase_kstack
15218 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15219 + call pax_erase_kstack
15220 +#endif
15221 +.endm
15222 +
15223 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15224 +/*
15225 + * ebp: thread_info
15226 + * ecx, edx: can be clobbered
15227 + */
15228 +ENTRY(pax_erase_kstack)
15229 + pushl %edi
15230 + pushl %eax
15231 +
15232 + mov TI_lowest_stack(%ebp), %edi
15233 + mov $-0xBEEF, %eax
15234 + std
15235 +
15236 +1: mov %edi, %ecx
15237 + and $THREAD_SIZE_asm - 1, %ecx
15238 + shr $2, %ecx
15239 + repne scasl
15240 + jecxz 2f
15241 +
15242 + cmp $2*16, %ecx
15243 + jc 2f
15244 +
15245 + mov $2*16, %ecx
15246 + repe scasl
15247 + jecxz 2f
15248 + jne 1b
15249 +
15250 +2: cld
15251 + mov %esp, %ecx
15252 + sub %edi, %ecx
15253 + shr $2, %ecx
15254 + rep stosl
15255 +
15256 + mov TI_task_thread_sp0(%ebp), %edi
15257 + sub $128, %edi
15258 + mov %edi, TI_lowest_stack(%ebp)
15259 +
15260 + popl %eax
15261 + popl %edi
15262 + ret
15263 +ENDPROC(pax_erase_kstack)
15264 +#endif
15265 +
15266 +.macro __SAVE_ALL _DS
15267 cld
15268 PUSH_GS
15269 pushl %fs
15270 @@ -224,7 +357,7 @@
15271 pushl %ebx
15272 CFI_ADJUST_CFA_OFFSET 4
15273 CFI_REL_OFFSET ebx, 0
15274 - movl $(__USER_DS), %edx
15275 + movl $\_DS, %edx
15276 movl %edx, %ds
15277 movl %edx, %es
15278 movl $(__KERNEL_PERCPU), %edx
15279 @@ -232,6 +365,15 @@
15280 SET_KERNEL_GS %edx
15281 .endm
15282
15283 +.macro SAVE_ALL
15284 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15285 + __SAVE_ALL __KERNEL_DS
15286 + pax_enter_kernel
15287 +#else
15288 + __SAVE_ALL __USER_DS
15289 +#endif
15290 +.endm
15291 +
15292 .macro RESTORE_INT_REGS
15293 popl %ebx
15294 CFI_ADJUST_CFA_OFFSET -4
15295 @@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15296 CFI_ADJUST_CFA_OFFSET -4
15297 jmp syscall_exit
15298 CFI_ENDPROC
15299 -END(ret_from_fork)
15300 +ENDPROC(ret_from_fork)
15301
15302 /*
15303 * Return to user mode is not as complex as all this looks,
15304 @@ -352,7 +494,15 @@ check_userspace:
15305 movb PT_CS(%esp), %al
15306 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15307 cmpl $USER_RPL, %eax
15308 +
15309 +#ifdef CONFIG_PAX_KERNEXEC
15310 + jae resume_userspace
15311 +
15312 + PAX_EXIT_KERNEL
15313 + jmp resume_kernel
15314 +#else
15315 jb resume_kernel # not returning to v8086 or userspace
15316 +#endif
15317
15318 ENTRY(resume_userspace)
15319 LOCKDEP_SYS_EXIT
15320 @@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15321 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15322 # int/exception return?
15323 jne work_pending
15324 - jmp restore_all
15325 -END(ret_from_exception)
15326 + jmp restore_all_pax
15327 +ENDPROC(ret_from_exception)
15328
15329 #ifdef CONFIG_PREEMPT
15330 ENTRY(resume_kernel)
15331 @@ -380,7 +530,7 @@ need_resched:
15332 jz restore_all
15333 call preempt_schedule_irq
15334 jmp need_resched
15335 -END(resume_kernel)
15336 +ENDPROC(resume_kernel)
15337 #endif
15338 CFI_ENDPROC
15339
15340 @@ -414,25 +564,36 @@ sysenter_past_esp:
15341 /*CFI_REL_OFFSET cs, 0*/
15342 /*
15343 * Push current_thread_info()->sysenter_return to the stack.
15344 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15345 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15346 */
15347 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15348 + pushl $0
15349 CFI_ADJUST_CFA_OFFSET 4
15350 CFI_REL_OFFSET eip, 0
15351
15352 pushl %eax
15353 CFI_ADJUST_CFA_OFFSET 4
15354 SAVE_ALL
15355 + GET_THREAD_INFO(%ebp)
15356 + movl TI_sysenter_return(%ebp),%ebp
15357 + movl %ebp,PT_EIP(%esp)
15358 ENABLE_INTERRUPTS(CLBR_NONE)
15359
15360 /*
15361 * Load the potential sixth argument from user stack.
15362 * Careful about security.
15363 */
15364 + movl PT_OLDESP(%esp),%ebp
15365 +
15366 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15367 + mov PT_OLDSS(%esp),%ds
15368 +1: movl %ds:(%ebp),%ebp
15369 + push %ss
15370 + pop %ds
15371 +#else
15372 cmpl $__PAGE_OFFSET-3,%ebp
15373 jae syscall_fault
15374 1: movl (%ebp),%ebp
15375 +#endif
15376 +
15377 movl %ebp,PT_EBP(%esp)
15378 .section __ex_table,"a"
15379 .align 4
15380 @@ -455,12 +616,24 @@ sysenter_do_call:
15381 testl $_TIF_ALLWORK_MASK, %ecx
15382 jne sysexit_audit
15383 sysenter_exit:
15384 +
15385 +#ifdef CONFIG_PAX_RANDKSTACK
15386 + pushl_cfi %eax
15387 + movl %esp, %eax
15388 + call pax_randomize_kstack
15389 + popl_cfi %eax
15390 +#endif
15391 +
15392 + pax_erase_kstack
15393 +
15394 /* if something modifies registers it must also disable sysexit */
15395 movl PT_EIP(%esp), %edx
15396 movl PT_OLDESP(%esp), %ecx
15397 xorl %ebp,%ebp
15398 TRACE_IRQS_ON
15399 1: mov PT_FS(%esp), %fs
15400 +2: mov PT_DS(%esp), %ds
15401 +3: mov PT_ES(%esp), %es
15402 PTGS_TO_GS
15403 ENABLE_INTERRUPTS_SYSEXIT
15404
15405 @@ -477,6 +650,9 @@ sysenter_audit:
15406 movl %eax,%edx /* 2nd arg: syscall number */
15407 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15408 call audit_syscall_entry
15409 +
15410 + pax_erase_kstack
15411 +
15412 pushl %ebx
15413 CFI_ADJUST_CFA_OFFSET 4
15414 movl PT_EAX(%esp),%eax /* reload syscall number */
15415 @@ -504,11 +680,17 @@ sysexit_audit:
15416
15417 CFI_ENDPROC
15418 .pushsection .fixup,"ax"
15419 -2: movl $0,PT_FS(%esp)
15420 +4: movl $0,PT_FS(%esp)
15421 + jmp 1b
15422 +5: movl $0,PT_DS(%esp)
15423 + jmp 1b
15424 +6: movl $0,PT_ES(%esp)
15425 jmp 1b
15426 .section __ex_table,"a"
15427 .align 4
15428 - .long 1b,2b
15429 + .long 1b,4b
15430 + .long 2b,5b
15431 + .long 3b,6b
15432 .popsection
15433 PTGS_TO_GS_EX
15434 ENDPROC(ia32_sysenter_target)
15435 @@ -538,6 +720,15 @@ syscall_exit:
15436 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15437 jne syscall_exit_work
15438
15439 +restore_all_pax:
15440 +
15441 +#ifdef CONFIG_PAX_RANDKSTACK
15442 + movl %esp, %eax
15443 + call pax_randomize_kstack
15444 +#endif
15445 +
15446 + pax_erase_kstack
15447 +
15448 restore_all:
15449 TRACE_IRQS_IRET
15450 restore_all_notrace:
15451 @@ -602,10 +793,29 @@ ldt_ss:
15452 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15453 mov %dx, %ax /* eax: new kernel esp */
15454 sub %eax, %edx /* offset (low word is 0) */
15455 - PER_CPU(gdt_page, %ebx)
15456 +#ifdef CONFIG_SMP
15457 + movl PER_CPU_VAR(cpu_number), %ebx
15458 + shll $PAGE_SHIFT_asm, %ebx
15459 + addl $cpu_gdt_table, %ebx
15460 +#else
15461 + movl $cpu_gdt_table, %ebx
15462 +#endif
15463 shr $16, %edx
15464 +
15465 +#ifdef CONFIG_PAX_KERNEXEC
15466 + mov %cr0, %esi
15467 + btr $16, %esi
15468 + mov %esi, %cr0
15469 +#endif
15470 +
15471 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15472 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15473 +
15474 +#ifdef CONFIG_PAX_KERNEXEC
15475 + bts $16, %esi
15476 + mov %esi, %cr0
15477 +#endif
15478 +
15479 pushl $__ESPFIX_SS
15480 CFI_ADJUST_CFA_OFFSET 4
15481 push %eax /* new kernel esp */
15482 @@ -636,36 +846,30 @@ work_resched:
15483 movl TI_flags(%ebp), %ecx
15484 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15485 # than syscall tracing?
15486 - jz restore_all
15487 + jz restore_all_pax
15488 testb $_TIF_NEED_RESCHED, %cl
15489 jnz work_resched
15490
15491 work_notifysig: # deal with pending signals and
15492 # notify-resume requests
15493 + movl %esp, %eax
15494 #ifdef CONFIG_VM86
15495 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15496 - movl %esp, %eax
15497 - jne work_notifysig_v86 # returning to kernel-space or
15498 + jz 1f # returning to kernel-space or
15499 # vm86-space
15500 - xorl %edx, %edx
15501 - call do_notify_resume
15502 - jmp resume_userspace_sig
15503
15504 - ALIGN
15505 -work_notifysig_v86:
15506 pushl %ecx # save ti_flags for do_notify_resume
15507 CFI_ADJUST_CFA_OFFSET 4
15508 call save_v86_state # %eax contains pt_regs pointer
15509 popl %ecx
15510 CFI_ADJUST_CFA_OFFSET -4
15511 movl %eax, %esp
15512 -#else
15513 - movl %esp, %eax
15514 +1:
15515 #endif
15516 xorl %edx, %edx
15517 call do_notify_resume
15518 jmp resume_userspace_sig
15519 -END(work_pending)
15520 +ENDPROC(work_pending)
15521
15522 # perform syscall exit tracing
15523 ALIGN
15524 @@ -673,11 +877,14 @@ syscall_trace_entry:
15525 movl $-ENOSYS,PT_EAX(%esp)
15526 movl %esp, %eax
15527 call syscall_trace_enter
15528 +
15529 + pax_erase_kstack
15530 +
15531 /* What it returned is what we'll actually use. */
15532 cmpl $(nr_syscalls), %eax
15533 jnae syscall_call
15534 jmp syscall_exit
15535 -END(syscall_trace_entry)
15536 +ENDPROC(syscall_trace_entry)
15537
15538 # perform syscall exit tracing
15539 ALIGN
15540 @@ -690,20 +897,24 @@ syscall_exit_work:
15541 movl %esp, %eax
15542 call syscall_trace_leave
15543 jmp resume_userspace
15544 -END(syscall_exit_work)
15545 +ENDPROC(syscall_exit_work)
15546 CFI_ENDPROC
15547
15548 RING0_INT_FRAME # can't unwind into user space anyway
15549 syscall_fault:
15550 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15551 + push %ss
15552 + pop %ds
15553 +#endif
15554 GET_THREAD_INFO(%ebp)
15555 movl $-EFAULT,PT_EAX(%esp)
15556 jmp resume_userspace
15557 -END(syscall_fault)
15558 +ENDPROC(syscall_fault)
15559
15560 syscall_badsys:
15561 movl $-ENOSYS,PT_EAX(%esp)
15562 jmp resume_userspace
15563 -END(syscall_badsys)
15564 +ENDPROC(syscall_badsys)
15565 CFI_ENDPROC
15566
15567 /*
15568 @@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15569 PTREGSCALL(vm86)
15570 PTREGSCALL(vm86old)
15571
15572 + ALIGN;
15573 +ENTRY(kernel_execve)
15574 + push %ebp
15575 + sub $PT_OLDSS+4,%esp
15576 + push %edi
15577 + push %ecx
15578 + push %eax
15579 + lea 3*4(%esp),%edi
15580 + mov $PT_OLDSS/4+1,%ecx
15581 + xorl %eax,%eax
15582 + rep stosl
15583 + pop %eax
15584 + pop %ecx
15585 + pop %edi
15586 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15587 + mov %eax,PT_EBX(%esp)
15588 + mov %edx,PT_ECX(%esp)
15589 + mov %ecx,PT_EDX(%esp)
15590 + mov %esp,%eax
15591 + call sys_execve
15592 + GET_THREAD_INFO(%ebp)
15593 + test %eax,%eax
15594 + jz syscall_exit
15595 + add $PT_OLDSS+4,%esp
15596 + pop %ebp
15597 + ret
15598 +
15599 .macro FIXUP_ESPFIX_STACK
15600 /*
15601 * Switch back for ESPFIX stack to the normal zerobased stack
15602 @@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15603 * normal stack and adjusts ESP with the matching offset.
15604 */
15605 /* fixup the stack */
15606 - PER_CPU(gdt_page, %ebx)
15607 +#ifdef CONFIG_SMP
15608 + movl PER_CPU_VAR(cpu_number), %ebx
15609 + shll $PAGE_SHIFT_asm, %ebx
15610 + addl $cpu_gdt_table, %ebx
15611 +#else
15612 + movl $cpu_gdt_table, %ebx
15613 +#endif
15614 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15615 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15616 shl $16, %eax
15617 @@ -793,7 +1037,7 @@ vector=vector+1
15618 .endr
15619 2: jmp common_interrupt
15620 .endr
15621 -END(irq_entries_start)
15622 +ENDPROC(irq_entries_start)
15623
15624 .previous
15625 END(interrupt)
15626 @@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15627 CFI_ADJUST_CFA_OFFSET 4
15628 jmp error_code
15629 CFI_ENDPROC
15630 -END(coprocessor_error)
15631 +ENDPROC(coprocessor_error)
15632
15633 ENTRY(simd_coprocessor_error)
15634 RING0_INT_FRAME
15635 @@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15636 CFI_ADJUST_CFA_OFFSET 4
15637 jmp error_code
15638 CFI_ENDPROC
15639 -END(simd_coprocessor_error)
15640 +ENDPROC(simd_coprocessor_error)
15641
15642 ENTRY(device_not_available)
15643 RING0_INT_FRAME
15644 @@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15645 CFI_ADJUST_CFA_OFFSET 4
15646 jmp error_code
15647 CFI_ENDPROC
15648 -END(device_not_available)
15649 +ENDPROC(device_not_available)
15650
15651 #ifdef CONFIG_PARAVIRT
15652 ENTRY(native_iret)
15653 @@ -869,12 +1113,12 @@ ENTRY(native_iret)
15654 .align 4
15655 .long native_iret, iret_exc
15656 .previous
15657 -END(native_iret)
15658 +ENDPROC(native_iret)
15659
15660 ENTRY(native_irq_enable_sysexit)
15661 sti
15662 sysexit
15663 -END(native_irq_enable_sysexit)
15664 +ENDPROC(native_irq_enable_sysexit)
15665 #endif
15666
15667 ENTRY(overflow)
15668 @@ -885,7 +1129,7 @@ ENTRY(overflow)
15669 CFI_ADJUST_CFA_OFFSET 4
15670 jmp error_code
15671 CFI_ENDPROC
15672 -END(overflow)
15673 +ENDPROC(overflow)
15674
15675 ENTRY(bounds)
15676 RING0_INT_FRAME
15677 @@ -895,7 +1139,7 @@ ENTRY(bounds)
15678 CFI_ADJUST_CFA_OFFSET 4
15679 jmp error_code
15680 CFI_ENDPROC
15681 -END(bounds)
15682 +ENDPROC(bounds)
15683
15684 ENTRY(invalid_op)
15685 RING0_INT_FRAME
15686 @@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15687 CFI_ADJUST_CFA_OFFSET 4
15688 jmp error_code
15689 CFI_ENDPROC
15690 -END(invalid_op)
15691 +ENDPROC(invalid_op)
15692
15693 ENTRY(coprocessor_segment_overrun)
15694 RING0_INT_FRAME
15695 @@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15696 CFI_ADJUST_CFA_OFFSET 4
15697 jmp error_code
15698 CFI_ENDPROC
15699 -END(coprocessor_segment_overrun)
15700 +ENDPROC(coprocessor_segment_overrun)
15701
15702 ENTRY(invalid_TSS)
15703 RING0_EC_FRAME
15704 @@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15705 CFI_ADJUST_CFA_OFFSET 4
15706 jmp error_code
15707 CFI_ENDPROC
15708 -END(invalid_TSS)
15709 +ENDPROC(invalid_TSS)
15710
15711 ENTRY(segment_not_present)
15712 RING0_EC_FRAME
15713 @@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15714 CFI_ADJUST_CFA_OFFSET 4
15715 jmp error_code
15716 CFI_ENDPROC
15717 -END(segment_not_present)
15718 +ENDPROC(segment_not_present)
15719
15720 ENTRY(stack_segment)
15721 RING0_EC_FRAME
15722 @@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15723 CFI_ADJUST_CFA_OFFSET 4
15724 jmp error_code
15725 CFI_ENDPROC
15726 -END(stack_segment)
15727 +ENDPROC(stack_segment)
15728
15729 ENTRY(alignment_check)
15730 RING0_EC_FRAME
15731 @@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15732 CFI_ADJUST_CFA_OFFSET 4
15733 jmp error_code
15734 CFI_ENDPROC
15735 -END(alignment_check)
15736 +ENDPROC(alignment_check)
15737
15738 ENTRY(divide_error)
15739 RING0_INT_FRAME
15740 @@ -957,7 +1201,7 @@ ENTRY(divide_error)
15741 CFI_ADJUST_CFA_OFFSET 4
15742 jmp error_code
15743 CFI_ENDPROC
15744 -END(divide_error)
15745 +ENDPROC(divide_error)
15746
15747 #ifdef CONFIG_X86_MCE
15748 ENTRY(machine_check)
15749 @@ -968,7 +1212,7 @@ ENTRY(machine_check)
15750 CFI_ADJUST_CFA_OFFSET 4
15751 jmp error_code
15752 CFI_ENDPROC
15753 -END(machine_check)
15754 +ENDPROC(machine_check)
15755 #endif
15756
15757 ENTRY(spurious_interrupt_bug)
15758 @@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15759 CFI_ADJUST_CFA_OFFSET 4
15760 jmp error_code
15761 CFI_ENDPROC
15762 -END(spurious_interrupt_bug)
15763 +ENDPROC(spurious_interrupt_bug)
15764
15765 ENTRY(kernel_thread_helper)
15766 pushl $0 # fake return address for unwinder
15767 @@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15768
15769 ENTRY(mcount)
15770 ret
15771 -END(mcount)
15772 +ENDPROC(mcount)
15773
15774 ENTRY(ftrace_caller)
15775 cmpl $0, function_trace_stop
15776 @@ -1124,7 +1368,7 @@ ftrace_graph_call:
15777 .globl ftrace_stub
15778 ftrace_stub:
15779 ret
15780 -END(ftrace_caller)
15781 +ENDPROC(ftrace_caller)
15782
15783 #else /* ! CONFIG_DYNAMIC_FTRACE */
15784
15785 @@ -1160,7 +1404,7 @@ trace:
15786 popl %ecx
15787 popl %eax
15788 jmp ftrace_stub
15789 -END(mcount)
15790 +ENDPROC(mcount)
15791 #endif /* CONFIG_DYNAMIC_FTRACE */
15792 #endif /* CONFIG_FUNCTION_TRACER */
15793
15794 @@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15795 popl %ecx
15796 popl %eax
15797 ret
15798 -END(ftrace_graph_caller)
15799 +ENDPROC(ftrace_graph_caller)
15800
15801 .globl return_to_handler
15802 return_to_handler:
15803 @@ -1198,7 +1442,6 @@ return_to_handler:
15804 ret
15805 #endif
15806
15807 -.section .rodata,"a"
15808 #include "syscall_table_32.S"
15809
15810 syscall_table_size=(.-sys_call_table)
15811 @@ -1255,15 +1498,18 @@ error_code:
15812 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15813 REG_TO_PTGS %ecx
15814 SET_KERNEL_GS %ecx
15815 - movl $(__USER_DS), %ecx
15816 + movl $(__KERNEL_DS), %ecx
15817 movl %ecx, %ds
15818 movl %ecx, %es
15819 +
15820 + pax_enter_kernel
15821 +
15822 TRACE_IRQS_OFF
15823 movl %esp,%eax # pt_regs pointer
15824 call *%edi
15825 jmp ret_from_exception
15826 CFI_ENDPROC
15827 -END(page_fault)
15828 +ENDPROC(page_fault)
15829
15830 /*
15831 * Debug traps and NMI can happen at the one SYSENTER instruction
15832 @@ -1309,7 +1555,7 @@ debug_stack_correct:
15833 call do_debug
15834 jmp ret_from_exception
15835 CFI_ENDPROC
15836 -END(debug)
15837 +ENDPROC(debug)
15838
15839 /*
15840 * NMI is doubly nasty. It can happen _while_ we're handling
15841 @@ -1351,6 +1597,9 @@ nmi_stack_correct:
15842 xorl %edx,%edx # zero error code
15843 movl %esp,%eax # pt_regs pointer
15844 call do_nmi
15845 +
15846 + pax_exit_kernel
15847 +
15848 jmp restore_all_notrace
15849 CFI_ENDPROC
15850
15851 @@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15852 FIXUP_ESPFIX_STACK # %eax == %esp
15853 xorl %edx,%edx # zero error code
15854 call do_nmi
15855 +
15856 + pax_exit_kernel
15857 +
15858 RESTORE_REGS
15859 lss 12+4(%esp), %esp # back to espfix stack
15860 CFI_ADJUST_CFA_OFFSET -24
15861 jmp irq_return
15862 CFI_ENDPROC
15863 -END(nmi)
15864 +ENDPROC(nmi)
15865
15866 ENTRY(int3)
15867 RING0_INT_FRAME
15868 @@ -1409,7 +1661,7 @@ ENTRY(int3)
15869 call do_int3
15870 jmp ret_from_exception
15871 CFI_ENDPROC
15872 -END(int3)
15873 +ENDPROC(int3)
15874
15875 ENTRY(general_protection)
15876 RING0_EC_FRAME
15877 @@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15878 CFI_ADJUST_CFA_OFFSET 4
15879 jmp error_code
15880 CFI_ENDPROC
15881 -END(general_protection)
15882 +ENDPROC(general_protection)
15883
15884 /*
15885 * End of kprobes section
15886 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15887 index 34a56a9..87790b4 100644
15888 --- a/arch/x86/kernel/entry_64.S
15889 +++ b/arch/x86/kernel/entry_64.S
15890 @@ -53,6 +53,8 @@
15891 #include <asm/paravirt.h>
15892 #include <asm/ftrace.h>
15893 #include <asm/percpu.h>
15894 +#include <asm/pgtable.h>
15895 +#include <asm/alternative-asm.h>
15896
15897 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15898 #include <linux/elf-em.h>
15899 @@ -64,8 +66,9 @@
15900 #ifdef CONFIG_FUNCTION_TRACER
15901 #ifdef CONFIG_DYNAMIC_FTRACE
15902 ENTRY(mcount)
15903 + pax_force_retaddr
15904 retq
15905 -END(mcount)
15906 +ENDPROC(mcount)
15907
15908 ENTRY(ftrace_caller)
15909 cmpl $0, function_trace_stop
15910 @@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15911 #endif
15912
15913 GLOBAL(ftrace_stub)
15914 + pax_force_retaddr
15915 retq
15916 -END(ftrace_caller)
15917 +ENDPROC(ftrace_caller)
15918
15919 #else /* ! CONFIG_DYNAMIC_FTRACE */
15920 ENTRY(mcount)
15921 @@ -108,6 +112,7 @@ ENTRY(mcount)
15922 #endif
15923
15924 GLOBAL(ftrace_stub)
15925 + pax_force_retaddr
15926 retq
15927
15928 trace:
15929 @@ -117,12 +122,13 @@ trace:
15930 movq 8(%rbp), %rsi
15931 subq $MCOUNT_INSN_SIZE, %rdi
15932
15933 + pax_force_fptr ftrace_trace_function
15934 call *ftrace_trace_function
15935
15936 MCOUNT_RESTORE_FRAME
15937
15938 jmp ftrace_stub
15939 -END(mcount)
15940 +ENDPROC(mcount)
15941 #endif /* CONFIG_DYNAMIC_FTRACE */
15942 #endif /* CONFIG_FUNCTION_TRACER */
15943
15944 @@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15945
15946 MCOUNT_RESTORE_FRAME
15947
15948 + pax_force_retaddr
15949 retq
15950 -END(ftrace_graph_caller)
15951 +ENDPROC(ftrace_graph_caller)
15952
15953 GLOBAL(return_to_handler)
15954 subq $24, %rsp
15955 @@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15956 movq 8(%rsp), %rdx
15957 movq (%rsp), %rax
15958 addq $16, %rsp
15959 + pax_force_retaddr
15960 retq
15961 #endif
15962
15963 @@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15964 ENDPROC(native_usergs_sysret64)
15965 #endif /* CONFIG_PARAVIRT */
15966
15967 + .macro ljmpq sel, off
15968 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15969 + .byte 0x48; ljmp *1234f(%rip)
15970 + .pushsection .rodata
15971 + .align 16
15972 + 1234: .quad \off; .word \sel
15973 + .popsection
15974 +#else
15975 + pushq $\sel
15976 + pushq $\off
15977 + lretq
15978 +#endif
15979 + .endm
15980 +
15981 + .macro pax_enter_kernel
15982 + pax_set_fptr_mask
15983 +#ifdef CONFIG_PAX_KERNEXEC
15984 + call pax_enter_kernel
15985 +#endif
15986 + .endm
15987 +
15988 + .macro pax_exit_kernel
15989 +#ifdef CONFIG_PAX_KERNEXEC
15990 + call pax_exit_kernel
15991 +#endif
15992 + .endm
15993 +
15994 +#ifdef CONFIG_PAX_KERNEXEC
15995 +ENTRY(pax_enter_kernel)
15996 + pushq %rdi
15997 +
15998 +#ifdef CONFIG_PARAVIRT
15999 + PV_SAVE_REGS(CLBR_RDI)
16000 +#endif
16001 +
16002 + GET_CR0_INTO_RDI
16003 + bts $16,%rdi
16004 + jnc 3f
16005 + mov %cs,%edi
16006 + cmp $__KERNEL_CS,%edi
16007 + jnz 2f
16008 +1:
16009 +
16010 +#ifdef CONFIG_PARAVIRT
16011 + PV_RESTORE_REGS(CLBR_RDI)
16012 +#endif
16013 +
16014 + popq %rdi
16015 + pax_force_retaddr
16016 + retq
16017 +
16018 +2: ljmpq __KERNEL_CS,1f
16019 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
16020 +4: SET_RDI_INTO_CR0
16021 + jmp 1b
16022 +ENDPROC(pax_enter_kernel)
16023 +
16024 +ENTRY(pax_exit_kernel)
16025 + pushq %rdi
16026 +
16027 +#ifdef CONFIG_PARAVIRT
16028 + PV_SAVE_REGS(CLBR_RDI)
16029 +#endif
16030 +
16031 + mov %cs,%rdi
16032 + cmp $__KERNEXEC_KERNEL_CS,%edi
16033 + jz 2f
16034 +1:
16035 +
16036 +#ifdef CONFIG_PARAVIRT
16037 + PV_RESTORE_REGS(CLBR_RDI);
16038 +#endif
16039 +
16040 + popq %rdi
16041 + pax_force_retaddr
16042 + retq
16043 +
16044 +2: GET_CR0_INTO_RDI
16045 + btr $16,%rdi
16046 + ljmpq __KERNEL_CS,3f
16047 +3: SET_RDI_INTO_CR0
16048 + jmp 1b
16049 +#ifdef CONFIG_PARAVIRT
16050 + PV_RESTORE_REGS(CLBR_RDI);
16051 +#endif
16052 +
16053 + popq %rdi
16054 + pax_force_retaddr
16055 + retq
16056 +ENDPROC(pax_exit_kernel)
16057 +#endif
16058 +
16059 + .macro pax_enter_kernel_user
16060 + pax_set_fptr_mask
16061 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16062 + call pax_enter_kernel_user
16063 +#endif
16064 + .endm
16065 +
16066 + .macro pax_exit_kernel_user
16067 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16068 + call pax_exit_kernel_user
16069 +#endif
16070 +#ifdef CONFIG_PAX_RANDKSTACK
16071 + pushq %rax
16072 + call pax_randomize_kstack
16073 + popq %rax
16074 +#endif
16075 + .endm
16076 +
16077 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16078 +ENTRY(pax_enter_kernel_user)
16079 + pushq %rdi
16080 + pushq %rbx
16081 +
16082 +#ifdef CONFIG_PARAVIRT
16083 + PV_SAVE_REGS(CLBR_RDI)
16084 +#endif
16085 +
16086 + GET_CR3_INTO_RDI
16087 + mov %rdi,%rbx
16088 + add $__START_KERNEL_map,%rbx
16089 + sub phys_base(%rip),%rbx
16090 +
16091 +#ifdef CONFIG_PARAVIRT
16092 + pushq %rdi
16093 + cmpl $0, pv_info+PARAVIRT_enabled
16094 + jz 1f
16095 + i = 0
16096 + .rept USER_PGD_PTRS
16097 + mov i*8(%rbx),%rsi
16098 + mov $0,%sil
16099 + lea i*8(%rbx),%rdi
16100 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16101 + i = i + 1
16102 + .endr
16103 + jmp 2f
16104 +1:
16105 +#endif
16106 +
16107 + i = 0
16108 + .rept USER_PGD_PTRS
16109 + movb $0,i*8(%rbx)
16110 + i = i + 1
16111 + .endr
16112 +
16113 +#ifdef CONFIG_PARAVIRT
16114 +2: popq %rdi
16115 +#endif
16116 + SET_RDI_INTO_CR3
16117 +
16118 +#ifdef CONFIG_PAX_KERNEXEC
16119 + GET_CR0_INTO_RDI
16120 + bts $16,%rdi
16121 + SET_RDI_INTO_CR0
16122 +#endif
16123 +
16124 +#ifdef CONFIG_PARAVIRT
16125 + PV_RESTORE_REGS(CLBR_RDI)
16126 +#endif
16127 +
16128 + popq %rbx
16129 + popq %rdi
16130 + pax_force_retaddr
16131 + retq
16132 +ENDPROC(pax_enter_kernel_user)
16133 +
16134 +ENTRY(pax_exit_kernel_user)
16135 + push %rdi
16136 +
16137 +#ifdef CONFIG_PARAVIRT
16138 + pushq %rbx
16139 + PV_SAVE_REGS(CLBR_RDI)
16140 +#endif
16141 +
16142 +#ifdef CONFIG_PAX_KERNEXEC
16143 + GET_CR0_INTO_RDI
16144 + btr $16,%rdi
16145 + SET_RDI_INTO_CR0
16146 +#endif
16147 +
16148 + GET_CR3_INTO_RDI
16149 + add $__START_KERNEL_map,%rdi
16150 + sub phys_base(%rip),%rdi
16151 +
16152 +#ifdef CONFIG_PARAVIRT
16153 + cmpl $0, pv_info+PARAVIRT_enabled
16154 + jz 1f
16155 + mov %rdi,%rbx
16156 + i = 0
16157 + .rept USER_PGD_PTRS
16158 + mov i*8(%rbx),%rsi
16159 + mov $0x67,%sil
16160 + lea i*8(%rbx),%rdi
16161 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16162 + i = i + 1
16163 + .endr
16164 + jmp 2f
16165 +1:
16166 +#endif
16167 +
16168 + i = 0
16169 + .rept USER_PGD_PTRS
16170 + movb $0x67,i*8(%rdi)
16171 + i = i + 1
16172 + .endr
16173 +
16174 +#ifdef CONFIG_PARAVIRT
16175 +2: PV_RESTORE_REGS(CLBR_RDI)
16176 + popq %rbx
16177 +#endif
16178 +
16179 + popq %rdi
16180 + pax_force_retaddr
16181 + retq
16182 +ENDPROC(pax_exit_kernel_user)
16183 +#endif
16184 +
16185 +.macro pax_erase_kstack
16186 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16187 + call pax_erase_kstack
16188 +#endif
16189 +.endm
16190 +
16191 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16192 +/*
16193 + * r11: thread_info
16194 + * rcx, rdx: can be clobbered
16195 + */
16196 +ENTRY(pax_erase_kstack)
16197 + pushq %rdi
16198 + pushq %rax
16199 + pushq %r11
16200 +
16201 + GET_THREAD_INFO(%r11)
16202 + mov TI_lowest_stack(%r11), %rdi
16203 + mov $-0xBEEF, %rax
16204 + std
16205 +
16206 +1: mov %edi, %ecx
16207 + and $THREAD_SIZE_asm - 1, %ecx
16208 + shr $3, %ecx
16209 + repne scasq
16210 + jecxz 2f
16211 +
16212 + cmp $2*8, %ecx
16213 + jc 2f
16214 +
16215 + mov $2*8, %ecx
16216 + repe scasq
16217 + jecxz 2f
16218 + jne 1b
16219 +
16220 +2: cld
16221 + mov %esp, %ecx
16222 + sub %edi, %ecx
16223 +
16224 + cmp $THREAD_SIZE_asm, %rcx
16225 + jb 3f
16226 + ud2
16227 +3:
16228 +
16229 + shr $3, %ecx
16230 + rep stosq
16231 +
16232 + mov TI_task_thread_sp0(%r11), %rdi
16233 + sub $256, %rdi
16234 + mov %rdi, TI_lowest_stack(%r11)
16235 +
16236 + popq %r11
16237 + popq %rax
16238 + popq %rdi
16239 + pax_force_retaddr
16240 + ret
16241 +ENDPROC(pax_erase_kstack)
16242 +#endif
16243
16244 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16245 #ifdef CONFIG_TRACE_IRQFLAGS
16246 @@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
16247 .endm
16248
16249 .macro UNFAKE_STACK_FRAME
16250 - addq $8*6, %rsp
16251 - CFI_ADJUST_CFA_OFFSET -(6*8)
16252 + addq $8*6 + ARG_SKIP, %rsp
16253 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16254 .endm
16255
16256 /*
16257 @@ -317,7 +601,7 @@ ENTRY(save_args)
16258 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
16259 movq_cfi rbp, 8 /* push %rbp */
16260 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
16261 - testl $3, CS(%rdi)
16262 + testb $3, CS(%rdi)
16263 je 1f
16264 SWAPGS
16265 /*
16266 @@ -337,9 +621,10 @@ ENTRY(save_args)
16267 * We entered an interrupt context - irqs are off:
16268 */
16269 2: TRACE_IRQS_OFF
16270 + pax_force_retaddr
16271 ret
16272 CFI_ENDPROC
16273 -END(save_args)
16274 +ENDPROC(save_args)
16275
16276 ENTRY(save_rest)
16277 PARTIAL_FRAME 1 REST_SKIP+8
16278 @@ -352,9 +637,10 @@ ENTRY(save_rest)
16279 movq_cfi r15, R15+16
16280 movq %r11, 8(%rsp) /* return address */
16281 FIXUP_TOP_OF_STACK %r11, 16
16282 + pax_force_retaddr
16283 ret
16284 CFI_ENDPROC
16285 -END(save_rest)
16286 +ENDPROC(save_rest)
16287
16288 /* save complete stack frame */
16289 .pushsection .kprobes.text, "ax"
16290 @@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16291 js 1f /* negative -> in kernel */
16292 SWAPGS
16293 xorl %ebx,%ebx
16294 -1: ret
16295 +1: pax_force_retaddr_bts
16296 + ret
16297 CFI_ENDPROC
16298 -END(save_paranoid)
16299 +ENDPROC(save_paranoid)
16300 .popsection
16301
16302 /*
16303 @@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16304
16305 RESTORE_REST
16306
16307 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16308 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16309 je int_ret_from_sys_call
16310
16311 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16312 @@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16313 jmp ret_from_sys_call # go to the SYSRET fastpath
16314
16315 CFI_ENDPROC
16316 -END(ret_from_fork)
16317 +ENDPROC(ret_from_fork)
16318
16319 /*
16320 * System call entry. Upto 6 arguments in registers are supported.
16321 @@ -455,7 +742,7 @@ END(ret_from_fork)
16322 ENTRY(system_call)
16323 CFI_STARTPROC simple
16324 CFI_SIGNAL_FRAME
16325 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16326 + CFI_DEF_CFA rsp,0
16327 CFI_REGISTER rip,rcx
16328 /*CFI_REGISTER rflags,r11*/
16329 SWAPGS_UNSAFE_STACK
16330 @@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16331
16332 movq %rsp,PER_CPU_VAR(old_rsp)
16333 movq PER_CPU_VAR(kernel_stack),%rsp
16334 + SAVE_ARGS 8*6,1
16335 + pax_enter_kernel_user
16336 /*
16337 * No need to follow this irqs off/on section - it's straight
16338 * and short:
16339 */
16340 ENABLE_INTERRUPTS(CLBR_NONE)
16341 - SAVE_ARGS 8,1
16342 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16343 movq %rcx,RIP-ARGOFFSET(%rsp)
16344 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16345 @@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16346 system_call_fastpath:
16347 cmpq $__NR_syscall_max,%rax
16348 ja badsys
16349 - movq %r10,%rcx
16350 + movq R10-ARGOFFSET(%rsp),%rcx
16351 call *sys_call_table(,%rax,8) # XXX: rip relative
16352 movq %rax,RAX-ARGOFFSET(%rsp)
16353 /*
16354 @@ -502,6 +790,8 @@ sysret_check:
16355 andl %edi,%edx
16356 jnz sysret_careful
16357 CFI_REMEMBER_STATE
16358 + pax_exit_kernel_user
16359 + pax_erase_kstack
16360 /*
16361 * sysretq will re-enable interrupts:
16362 */
16363 @@ -555,14 +845,18 @@ badsys:
16364 * jump back to the normal fast path.
16365 */
16366 auditsys:
16367 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16368 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16369 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16370 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16371 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16372 movq %rax,%rsi /* 2nd arg: syscall number */
16373 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16374 call audit_syscall_entry
16375 +
16376 + pax_erase_kstack
16377 +
16378 LOAD_ARGS 0 /* reload call-clobbered registers */
16379 + pax_set_fptr_mask
16380 jmp system_call_fastpath
16381
16382 /*
16383 @@ -592,16 +886,20 @@ tracesys:
16384 FIXUP_TOP_OF_STACK %rdi
16385 movq %rsp,%rdi
16386 call syscall_trace_enter
16387 +
16388 + pax_erase_kstack
16389 +
16390 /*
16391 * Reload arg registers from stack in case ptrace changed them.
16392 * We don't reload %rax because syscall_trace_enter() returned
16393 * the value it wants us to use in the table lookup.
16394 */
16395 LOAD_ARGS ARGOFFSET, 1
16396 + pax_set_fptr_mask
16397 RESTORE_REST
16398 cmpq $__NR_syscall_max,%rax
16399 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16400 - movq %r10,%rcx /* fixup for C */
16401 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16402 call *sys_call_table(,%rax,8)
16403 movq %rax,RAX-ARGOFFSET(%rsp)
16404 /* Use IRET because user could have changed frame */
16405 @@ -613,7 +911,7 @@ tracesys:
16406 GLOBAL(int_ret_from_sys_call)
16407 DISABLE_INTERRUPTS(CLBR_NONE)
16408 TRACE_IRQS_OFF
16409 - testl $3,CS-ARGOFFSET(%rsp)
16410 + testb $3,CS-ARGOFFSET(%rsp)
16411 je retint_restore_args
16412 movl $_TIF_ALLWORK_MASK,%edi
16413 /* edi: mask to check */
16414 @@ -624,6 +922,7 @@ GLOBAL(int_with_check)
16415 andl %edi,%edx
16416 jnz int_careful
16417 andl $~TS_COMPAT,TI_status(%rcx)
16418 + pax_erase_kstack
16419 jmp retint_swapgs
16420
16421 /* Either reschedule or signal or syscall exit tracking needed. */
16422 @@ -674,7 +973,7 @@ int_restore_rest:
16423 TRACE_IRQS_OFF
16424 jmp int_with_check
16425 CFI_ENDPROC
16426 -END(system_call)
16427 +ENDPROC(system_call)
16428
16429 /*
16430 * Certain special system calls that need to save a complete full stack frame.
16431 @@ -690,7 +989,7 @@ ENTRY(\label)
16432 call \func
16433 jmp ptregscall_common
16434 CFI_ENDPROC
16435 -END(\label)
16436 +ENDPROC(\label)
16437 .endm
16438
16439 PTREGSCALL stub_clone, sys_clone, %r8
16440 @@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
16441 movq_cfi_restore R12+8, r12
16442 movq_cfi_restore RBP+8, rbp
16443 movq_cfi_restore RBX+8, rbx
16444 + pax_force_retaddr
16445 ret $REST_SKIP /* pop extended registers */
16446 CFI_ENDPROC
16447 -END(ptregscall_common)
16448 +ENDPROC(ptregscall_common)
16449
16450 ENTRY(stub_execve)
16451 CFI_STARTPROC
16452 @@ -726,7 +1026,7 @@ ENTRY(stub_execve)
16453 RESTORE_REST
16454 jmp int_ret_from_sys_call
16455 CFI_ENDPROC
16456 -END(stub_execve)
16457 +ENDPROC(stub_execve)
16458
16459 /*
16460 * sigreturn is special because it needs to restore all registers on return.
16461 @@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16462 RESTORE_REST
16463 jmp int_ret_from_sys_call
16464 CFI_ENDPROC
16465 -END(stub_rt_sigreturn)
16466 +ENDPROC(stub_rt_sigreturn)
16467
16468 /*
16469 * Build the entry stubs and pointer table with some assembler magic.
16470 @@ -780,7 +1080,7 @@ vector=vector+1
16471 2: jmp common_interrupt
16472 .endr
16473 CFI_ENDPROC
16474 -END(irq_entries_start)
16475 +ENDPROC(irq_entries_start)
16476
16477 .previous
16478 END(interrupt)
16479 @@ -800,6 +1100,16 @@ END(interrupt)
16480 CFI_ADJUST_CFA_OFFSET 10*8
16481 call save_args
16482 PARTIAL_FRAME 0
16483 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16484 + testb $3, CS(%rdi)
16485 + jnz 1f
16486 + pax_enter_kernel
16487 + jmp 2f
16488 +1: pax_enter_kernel_user
16489 +2:
16490 +#else
16491 + pax_enter_kernel
16492 +#endif
16493 call \func
16494 .endm
16495
16496 @@ -822,7 +1132,7 @@ ret_from_intr:
16497 CFI_ADJUST_CFA_OFFSET -8
16498 exit_intr:
16499 GET_THREAD_INFO(%rcx)
16500 - testl $3,CS-ARGOFFSET(%rsp)
16501 + testb $3,CS-ARGOFFSET(%rsp)
16502 je retint_kernel
16503
16504 /* Interrupt came from user space */
16505 @@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
16506 * The iretq could re-enable interrupts:
16507 */
16508 DISABLE_INTERRUPTS(CLBR_ANY)
16509 + pax_exit_kernel_user
16510 TRACE_IRQS_IRETQ
16511 SWAPGS
16512 jmp restore_args
16513
16514 retint_restore_args: /* return to kernel space */
16515 DISABLE_INTERRUPTS(CLBR_ANY)
16516 + pax_exit_kernel
16517 + pax_force_retaddr RIP-ARGOFFSET
16518 /*
16519 * The iretq could re-enable interrupts:
16520 */
16521 @@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16522 #endif
16523
16524 CFI_ENDPROC
16525 -END(common_interrupt)
16526 +ENDPROC(common_interrupt)
16527
16528 /*
16529 * APIC interrupts.
16530 @@ -953,7 +1266,7 @@ ENTRY(\sym)
16531 interrupt \do_sym
16532 jmp ret_from_intr
16533 CFI_ENDPROC
16534 -END(\sym)
16535 +ENDPROC(\sym)
16536 .endm
16537
16538 #ifdef CONFIG_SMP
16539 @@ -1032,12 +1345,22 @@ ENTRY(\sym)
16540 CFI_ADJUST_CFA_OFFSET 15*8
16541 call error_entry
16542 DEFAULT_FRAME 0
16543 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16544 + testb $3, CS(%rsp)
16545 + jnz 1f
16546 + pax_enter_kernel
16547 + jmp 2f
16548 +1: pax_enter_kernel_user
16549 +2:
16550 +#else
16551 + pax_enter_kernel
16552 +#endif
16553 movq %rsp,%rdi /* pt_regs pointer */
16554 xorl %esi,%esi /* no error code */
16555 call \do_sym
16556 jmp error_exit /* %ebx: no swapgs flag */
16557 CFI_ENDPROC
16558 -END(\sym)
16559 +ENDPROC(\sym)
16560 .endm
16561
16562 .macro paranoidzeroentry sym do_sym
16563 @@ -1049,12 +1372,22 @@ ENTRY(\sym)
16564 subq $15*8, %rsp
16565 call save_paranoid
16566 TRACE_IRQS_OFF
16567 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16568 + testb $3, CS(%rsp)
16569 + jnz 1f
16570 + pax_enter_kernel
16571 + jmp 2f
16572 +1: pax_enter_kernel_user
16573 +2:
16574 +#else
16575 + pax_enter_kernel
16576 +#endif
16577 movq %rsp,%rdi /* pt_regs pointer */
16578 xorl %esi,%esi /* no error code */
16579 call \do_sym
16580 jmp paranoid_exit /* %ebx: no swapgs flag */
16581 CFI_ENDPROC
16582 -END(\sym)
16583 +ENDPROC(\sym)
16584 .endm
16585
16586 .macro paranoidzeroentry_ist sym do_sym ist
16587 @@ -1066,15 +1399,30 @@ ENTRY(\sym)
16588 subq $15*8, %rsp
16589 call save_paranoid
16590 TRACE_IRQS_OFF
16591 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16592 + testb $3, CS(%rsp)
16593 + jnz 1f
16594 + pax_enter_kernel
16595 + jmp 2f
16596 +1: pax_enter_kernel_user
16597 +2:
16598 +#else
16599 + pax_enter_kernel
16600 +#endif
16601 movq %rsp,%rdi /* pt_regs pointer */
16602 xorl %esi,%esi /* no error code */
16603 - PER_CPU(init_tss, %rbp)
16604 +#ifdef CONFIG_SMP
16605 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16606 + lea init_tss(%rbp), %rbp
16607 +#else
16608 + lea init_tss(%rip), %rbp
16609 +#endif
16610 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16611 call \do_sym
16612 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16613 jmp paranoid_exit /* %ebx: no swapgs flag */
16614 CFI_ENDPROC
16615 -END(\sym)
16616 +ENDPROC(\sym)
16617 .endm
16618
16619 .macro errorentry sym do_sym
16620 @@ -1085,13 +1433,23 @@ ENTRY(\sym)
16621 CFI_ADJUST_CFA_OFFSET 15*8
16622 call error_entry
16623 DEFAULT_FRAME 0
16624 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16625 + testb $3, CS(%rsp)
16626 + jnz 1f
16627 + pax_enter_kernel
16628 + jmp 2f
16629 +1: pax_enter_kernel_user
16630 +2:
16631 +#else
16632 + pax_enter_kernel
16633 +#endif
16634 movq %rsp,%rdi /* pt_regs pointer */
16635 movq ORIG_RAX(%rsp),%rsi /* get error code */
16636 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16637 call \do_sym
16638 jmp error_exit /* %ebx: no swapgs flag */
16639 CFI_ENDPROC
16640 -END(\sym)
16641 +ENDPROC(\sym)
16642 .endm
16643
16644 /* error code is on the stack already */
16645 @@ -1104,13 +1462,23 @@ ENTRY(\sym)
16646 call save_paranoid
16647 DEFAULT_FRAME 0
16648 TRACE_IRQS_OFF
16649 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16650 + testb $3, CS(%rsp)
16651 + jnz 1f
16652 + pax_enter_kernel
16653 + jmp 2f
16654 +1: pax_enter_kernel_user
16655 +2:
16656 +#else
16657 + pax_enter_kernel
16658 +#endif
16659 movq %rsp,%rdi /* pt_regs pointer */
16660 movq ORIG_RAX(%rsp),%rsi /* get error code */
16661 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16662 call \do_sym
16663 jmp paranoid_exit /* %ebx: no swapgs flag */
16664 CFI_ENDPROC
16665 -END(\sym)
16666 +ENDPROC(\sym)
16667 .endm
16668
16669 zeroentry divide_error do_divide_error
16670 @@ -1141,9 +1509,10 @@ gs_change:
16671 SWAPGS
16672 popf
16673 CFI_ADJUST_CFA_OFFSET -8
16674 + pax_force_retaddr
16675 ret
16676 CFI_ENDPROC
16677 -END(native_load_gs_index)
16678 +ENDPROC(native_load_gs_index)
16679
16680 .section __ex_table,"a"
16681 .align 8
16682 @@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16683 * of hacks for example to fork off the per-CPU idle tasks.
16684 * [Hopefully no generic code relies on the reschedule -AK]
16685 */
16686 - RESTORE_ALL
16687 + RESTORE_REST
16688 UNFAKE_STACK_FRAME
16689 + pax_force_retaddr
16690 ret
16691 CFI_ENDPROC
16692 -END(kernel_thread)
16693 +ENDPROC(kernel_thread)
16694
16695 ENTRY(child_rip)
16696 pushq $0 # fake return address
16697 @@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16698 */
16699 movq %rdi, %rax
16700 movq %rsi, %rdi
16701 + pax_force_fptr %rax
16702 call *%rax
16703 # exit
16704 mov %eax, %edi
16705 call do_exit
16706 ud2 # padding for call trace
16707 CFI_ENDPROC
16708 -END(child_rip)
16709 +ENDPROC(child_rip)
16710
16711 /*
16712 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16713 @@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16714 RESTORE_REST
16715 testq %rax,%rax
16716 je int_ret_from_sys_call
16717 - RESTORE_ARGS
16718 UNFAKE_STACK_FRAME
16719 + pax_force_retaddr
16720 ret
16721 CFI_ENDPROC
16722 -END(kernel_execve)
16723 +ENDPROC(kernel_execve)
16724
16725 /* Call softirq on interrupt stack. Interrupts are off. */
16726 ENTRY(call_softirq)
16727 @@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16728 CFI_DEF_CFA_REGISTER rsp
16729 CFI_ADJUST_CFA_OFFSET -8
16730 decl PER_CPU_VAR(irq_count)
16731 + pax_force_retaddr
16732 ret
16733 CFI_ENDPROC
16734 -END(call_softirq)
16735 +ENDPROC(call_softirq)
16736
16737 #ifdef CONFIG_XEN
16738 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16739 @@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16740 decl PER_CPU_VAR(irq_count)
16741 jmp error_exit
16742 CFI_ENDPROC
16743 -END(xen_do_hypervisor_callback)
16744 +ENDPROC(xen_do_hypervisor_callback)
16745
16746 /*
16747 * Hypervisor uses this for application faults while it executes.
16748 @@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16749 SAVE_ALL
16750 jmp error_exit
16751 CFI_ENDPROC
16752 -END(xen_failsafe_callback)
16753 +ENDPROC(xen_failsafe_callback)
16754
16755 #endif /* CONFIG_XEN */
16756
16757 @@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16758 TRACE_IRQS_OFF
16759 testl %ebx,%ebx /* swapgs needed? */
16760 jnz paranoid_restore
16761 - testl $3,CS(%rsp)
16762 + testb $3,CS(%rsp)
16763 jnz paranoid_userspace
16764 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16765 + pax_exit_kernel
16766 + TRACE_IRQS_IRETQ 0
16767 + SWAPGS_UNSAFE_STACK
16768 + RESTORE_ALL 8
16769 + pax_force_retaddr_bts
16770 + jmp irq_return
16771 +#endif
16772 paranoid_swapgs:
16773 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16774 + pax_exit_kernel_user
16775 +#else
16776 + pax_exit_kernel
16777 +#endif
16778 TRACE_IRQS_IRETQ 0
16779 SWAPGS_UNSAFE_STACK
16780 RESTORE_ALL 8
16781 jmp irq_return
16782 paranoid_restore:
16783 + pax_exit_kernel
16784 TRACE_IRQS_IRETQ 0
16785 RESTORE_ALL 8
16786 + pax_force_retaddr_bts
16787 jmp irq_return
16788 paranoid_userspace:
16789 GET_THREAD_INFO(%rcx)
16790 @@ -1443,7 +1830,7 @@ paranoid_schedule:
16791 TRACE_IRQS_OFF
16792 jmp paranoid_userspace
16793 CFI_ENDPROC
16794 -END(paranoid_exit)
16795 +ENDPROC(paranoid_exit)
16796
16797 /*
16798 * Exception entry point. This expects an error code/orig_rax on the stack.
16799 @@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16800 movq_cfi r14, R14+8
16801 movq_cfi r15, R15+8
16802 xorl %ebx,%ebx
16803 - testl $3,CS+8(%rsp)
16804 + testb $3,CS+8(%rsp)
16805 je error_kernelspace
16806 error_swapgs:
16807 SWAPGS
16808 error_sti:
16809 TRACE_IRQS_OFF
16810 + pax_force_retaddr_bts
16811 ret
16812 CFI_ENDPROC
16813
16814 @@ -1497,7 +1885,7 @@ error_kernelspace:
16815 cmpq $gs_change,RIP+8(%rsp)
16816 je error_swapgs
16817 jmp error_sti
16818 -END(error_entry)
16819 +ENDPROC(error_entry)
16820
16821
16822 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16823 @@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16824 jnz retint_careful
16825 jmp retint_swapgs
16826 CFI_ENDPROC
16827 -END(error_exit)
16828 +ENDPROC(error_exit)
16829
16830
16831 /* runs on exception stack */
16832 @@ -1529,6 +1917,16 @@ ENTRY(nmi)
16833 CFI_ADJUST_CFA_OFFSET 15*8
16834 call save_paranoid
16835 DEFAULT_FRAME 0
16836 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16837 + testb $3, CS(%rsp)
16838 + jnz 1f
16839 + pax_enter_kernel
16840 + jmp 2f
16841 +1: pax_enter_kernel_user
16842 +2:
16843 +#else
16844 + pax_enter_kernel
16845 +#endif
16846 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16847 movq %rsp,%rdi
16848 movq $-1,%rsi
16849 @@ -1539,12 +1937,28 @@ ENTRY(nmi)
16850 DISABLE_INTERRUPTS(CLBR_NONE)
16851 testl %ebx,%ebx /* swapgs needed? */
16852 jnz nmi_restore
16853 - testl $3,CS(%rsp)
16854 + testb $3,CS(%rsp)
16855 jnz nmi_userspace
16856 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16857 + pax_exit_kernel
16858 + SWAPGS_UNSAFE_STACK
16859 + RESTORE_ALL 8
16860 + pax_force_retaddr_bts
16861 + jmp irq_return
16862 +#endif
16863 nmi_swapgs:
16864 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16865 + pax_exit_kernel_user
16866 +#else
16867 + pax_exit_kernel
16868 +#endif
16869 SWAPGS_UNSAFE_STACK
16870 + RESTORE_ALL 8
16871 + jmp irq_return
16872 nmi_restore:
16873 + pax_exit_kernel
16874 RESTORE_ALL 8
16875 + pax_force_retaddr_bts
16876 jmp irq_return
16877 nmi_userspace:
16878 GET_THREAD_INFO(%rcx)
16879 @@ -1573,14 +1987,14 @@ nmi_schedule:
16880 jmp paranoid_exit
16881 CFI_ENDPROC
16882 #endif
16883 -END(nmi)
16884 +ENDPROC(nmi)
16885
16886 ENTRY(ignore_sysret)
16887 CFI_STARTPROC
16888 mov $-ENOSYS,%eax
16889 sysret
16890 CFI_ENDPROC
16891 -END(ignore_sysret)
16892 +ENDPROC(ignore_sysret)
16893
16894 /*
16895 * End of kprobes section
16896 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16897 index 9dbb527..7b3615a 100644
16898 --- a/arch/x86/kernel/ftrace.c
16899 +++ b/arch/x86/kernel/ftrace.c
16900 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16901 static void *mod_code_newcode; /* holds the text to write to the IP */
16902
16903 static unsigned nmi_wait_count;
16904 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16905 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16906
16907 int ftrace_arch_read_dyn_info(char *buf, int size)
16908 {
16909 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16910
16911 r = snprintf(buf, size, "%u %u",
16912 nmi_wait_count,
16913 - atomic_read(&nmi_update_count));
16914 + atomic_read_unchecked(&nmi_update_count));
16915 return r;
16916 }
16917
16918 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16919 {
16920 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16921 smp_rmb();
16922 + pax_open_kernel();
16923 ftrace_mod_code();
16924 - atomic_inc(&nmi_update_count);
16925 + pax_close_kernel();
16926 + atomic_inc_unchecked(&nmi_update_count);
16927 }
16928 /* Must have previous changes seen before executions */
16929 smp_mb();
16930 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16931
16932
16933
16934 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16935 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16936
16937 static unsigned char *ftrace_nop_replace(void)
16938 {
16939 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16940 {
16941 unsigned char replaced[MCOUNT_INSN_SIZE];
16942
16943 + ip = ktla_ktva(ip);
16944 +
16945 /*
16946 * Note: Due to modules and __init, code can
16947 * disappear and change, we need to protect against faulting
16948 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16949 unsigned char old[MCOUNT_INSN_SIZE], *new;
16950 int ret;
16951
16952 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16953 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16954 new = ftrace_call_replace(ip, (unsigned long)func);
16955 ret = ftrace_modify_code(ip, old, new);
16956
16957 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16958 switch (faulted) {
16959 case 0:
16960 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16961 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16962 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16963 break;
16964 case 1:
16965 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16966 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16967 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16968 break;
16969 case 2:
16970 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16971 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16972 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16973 break;
16974 }
16975
16976 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16977 {
16978 unsigned char code[MCOUNT_INSN_SIZE];
16979
16980 + ip = ktla_ktva(ip);
16981 +
16982 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16983 return -EFAULT;
16984
16985 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16986 index 4f8e250..df24706 100644
16987 --- a/arch/x86/kernel/head32.c
16988 +++ b/arch/x86/kernel/head32.c
16989 @@ -16,6 +16,7 @@
16990 #include <asm/apic.h>
16991 #include <asm/io_apic.h>
16992 #include <asm/bios_ebda.h>
16993 +#include <asm/boot.h>
16994
16995 static void __init i386_default_early_setup(void)
16996 {
16997 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16998 {
16999 reserve_trampoline_memory();
17000
17001 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
17002 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
17003
17004 #ifdef CONFIG_BLK_DEV_INITRD
17005 /* Reserve INITRD */
17006 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
17007 index 34c3308..6fc4e76 100644
17008 --- a/arch/x86/kernel/head_32.S
17009 +++ b/arch/x86/kernel/head_32.S
17010 @@ -19,10 +19,17 @@
17011 #include <asm/setup.h>
17012 #include <asm/processor-flags.h>
17013 #include <asm/percpu.h>
17014 +#include <asm/msr-index.h>
17015
17016 /* Physical address */
17017 #define pa(X) ((X) - __PAGE_OFFSET)
17018
17019 +#ifdef CONFIG_PAX_KERNEXEC
17020 +#define ta(X) (X)
17021 +#else
17022 +#define ta(X) ((X) - __PAGE_OFFSET)
17023 +#endif
17024 +
17025 /*
17026 * References to members of the new_cpu_data structure.
17027 */
17028 @@ -52,11 +59,7 @@
17029 * and small than max_low_pfn, otherwise will waste some page table entries
17030 */
17031
17032 -#if PTRS_PER_PMD > 1
17033 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
17034 -#else
17035 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
17036 -#endif
17037 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
17038
17039 /* Enough space to fit pagetables for the low memory linear map */
17040 MAPPING_BEYOND_END = \
17041 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
17042 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17043
17044 /*
17045 + * Real beginning of normal "text" segment
17046 + */
17047 +ENTRY(stext)
17048 +ENTRY(_stext)
17049 +
17050 +/*
17051 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
17052 * %esi points to the real-mode code as a 32-bit pointer.
17053 * CS and DS must be 4 GB flat segments, but we don't depend on
17054 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17055 * can.
17056 */
17057 __HEAD
17058 +
17059 +#ifdef CONFIG_PAX_KERNEXEC
17060 + jmp startup_32
17061 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
17062 +.fill PAGE_SIZE-5,1,0xcc
17063 +#endif
17064 +
17065 ENTRY(startup_32)
17066 + movl pa(stack_start),%ecx
17067 +
17068 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
17069 us to not reload segments */
17070 testb $(1<<6), BP_loadflags(%esi)
17071 @@ -95,7 +113,60 @@ ENTRY(startup_32)
17072 movl %eax,%es
17073 movl %eax,%fs
17074 movl %eax,%gs
17075 + movl %eax,%ss
17076 2:
17077 + leal -__PAGE_OFFSET(%ecx),%esp
17078 +
17079 +#ifdef CONFIG_SMP
17080 + movl $pa(cpu_gdt_table),%edi
17081 + movl $__per_cpu_load,%eax
17082 + movw %ax,__KERNEL_PERCPU + 2(%edi)
17083 + rorl $16,%eax
17084 + movb %al,__KERNEL_PERCPU + 4(%edi)
17085 + movb %ah,__KERNEL_PERCPU + 7(%edi)
17086 + movl $__per_cpu_end - 1,%eax
17087 + subl $__per_cpu_start,%eax
17088 + movw %ax,__KERNEL_PERCPU + 0(%edi)
17089 +#endif
17090 +
17091 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17092 + movl $NR_CPUS,%ecx
17093 + movl $pa(cpu_gdt_table),%edi
17094 +1:
17095 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
17096 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
17097 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
17098 + addl $PAGE_SIZE_asm,%edi
17099 + loop 1b
17100 +#endif
17101 +
17102 +#ifdef CONFIG_PAX_KERNEXEC
17103 + movl $pa(boot_gdt),%edi
17104 + movl $__LOAD_PHYSICAL_ADDR,%eax
17105 + movw %ax,__BOOT_CS + 2(%edi)
17106 + rorl $16,%eax
17107 + movb %al,__BOOT_CS + 4(%edi)
17108 + movb %ah,__BOOT_CS + 7(%edi)
17109 + rorl $16,%eax
17110 +
17111 + ljmp $(__BOOT_CS),$1f
17112 +1:
17113 +
17114 + movl $NR_CPUS,%ecx
17115 + movl $pa(cpu_gdt_table),%edi
17116 + addl $__PAGE_OFFSET,%eax
17117 +1:
17118 + movw %ax,__KERNEL_CS + 2(%edi)
17119 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
17120 + rorl $16,%eax
17121 + movb %al,__KERNEL_CS + 4(%edi)
17122 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
17123 + movb %ah,__KERNEL_CS + 7(%edi)
17124 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
17125 + rorl $16,%eax
17126 + addl $PAGE_SIZE_asm,%edi
17127 + loop 1b
17128 +#endif
17129
17130 /*
17131 * Clear BSS first so that there are no surprises...
17132 @@ -140,9 +211,7 @@ ENTRY(startup_32)
17133 cmpl $num_subarch_entries, %eax
17134 jae bad_subarch
17135
17136 - movl pa(subarch_entries)(,%eax,4), %eax
17137 - subl $__PAGE_OFFSET, %eax
17138 - jmp *%eax
17139 + jmp *pa(subarch_entries)(,%eax,4)
17140
17141 bad_subarch:
17142 WEAK(lguest_entry)
17143 @@ -154,10 +223,10 @@ WEAK(xen_entry)
17144 __INITDATA
17145
17146 subarch_entries:
17147 - .long default_entry /* normal x86/PC */
17148 - .long lguest_entry /* lguest hypervisor */
17149 - .long xen_entry /* Xen hypervisor */
17150 - .long default_entry /* Moorestown MID */
17151 + .long ta(default_entry) /* normal x86/PC */
17152 + .long ta(lguest_entry) /* lguest hypervisor */
17153 + .long ta(xen_entry) /* Xen hypervisor */
17154 + .long ta(default_entry) /* Moorestown MID */
17155 num_subarch_entries = (. - subarch_entries) / 4
17156 .previous
17157 #endif /* CONFIG_PARAVIRT */
17158 @@ -218,8 +287,11 @@ default_entry:
17159 movl %eax, pa(max_pfn_mapped)
17160
17161 /* Do early initialization of the fixmap area */
17162 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17163 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17164 +#ifdef CONFIG_COMPAT_VDSO
17165 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17166 +#else
17167 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17168 +#endif
17169 #else /* Not PAE */
17170
17171 page_pde_offset = (__PAGE_OFFSET >> 20);
17172 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
17173 movl %eax, pa(max_pfn_mapped)
17174
17175 /* Do early initialization of the fixmap area */
17176 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17177 - movl %eax,pa(swapper_pg_dir+0xffc)
17178 +#ifdef CONFIG_COMPAT_VDSO
17179 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
17180 +#else
17181 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
17182 +#endif
17183 #endif
17184 jmp 3f
17185 /*
17186 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
17187 movl %eax,%es
17188 movl %eax,%fs
17189 movl %eax,%gs
17190 + movl pa(stack_start),%ecx
17191 + movl %eax,%ss
17192 + leal -__PAGE_OFFSET(%ecx),%esp
17193 #endif /* CONFIG_SMP */
17194 3:
17195
17196 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
17197 orl %edx,%eax
17198 movl %eax,%cr4
17199
17200 +#ifdef CONFIG_X86_PAE
17201 btl $5, %eax # check if PAE is enabled
17202 jnc 6f
17203
17204 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
17205 cpuid
17206 cmpl $0x80000000, %eax
17207 jbe 6f
17208 +
17209 + /* Clear bogus XD_DISABLE bits */
17210 + call verify_cpu
17211 +
17212 mov $0x80000001, %eax
17213 cpuid
17214 /* Execute Disable bit supported? */
17215 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
17216 jnc 6f
17217
17218 /* Setup EFER (Extended Feature Enable Register) */
17219 - movl $0xc0000080, %ecx
17220 + movl $MSR_EFER, %ecx
17221 rdmsr
17222
17223 btsl $11, %eax
17224 /* Make changes effective */
17225 wrmsr
17226
17227 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
17228 + movl $1,pa(nx_enabled)
17229 +#endif
17230 +
17231 6:
17232
17233 /*
17234 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
17235 movl %eax,%cr0 /* ..and set paging (PG) bit */
17236 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
17237 1:
17238 - /* Set up the stack pointer */
17239 - lss stack_start,%esp
17240 + /* Shift the stack pointer to a virtual address */
17241 + addl $__PAGE_OFFSET, %esp
17242
17243 /*
17244 * Initialize eflags. Some BIOS's leave bits like NT set. This would
17245 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
17246
17247 #ifdef CONFIG_SMP
17248 cmpb $0, ready
17249 - jz 1f /* Initial CPU cleans BSS */
17250 - jmp checkCPUtype
17251 -1:
17252 + jnz checkCPUtype
17253 #endif /* CONFIG_SMP */
17254
17255 /*
17256 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
17257 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17258 movl %eax,%ss # after changing gdt.
17259
17260 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
17261 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17262 movl %eax,%ds
17263 movl %eax,%es
17264
17265 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
17266 */
17267 cmpb $0,ready
17268 jne 1f
17269 - movl $per_cpu__gdt_page,%eax
17270 + movl $cpu_gdt_table,%eax
17271 movl $per_cpu__stack_canary,%ecx
17272 +#ifdef CONFIG_SMP
17273 + addl $__per_cpu_load,%ecx
17274 +#endif
17275 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17276 shrl $16, %ecx
17277 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17278 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17279 1:
17280 -#endif
17281 movl $(__KERNEL_STACK_CANARY),%eax
17282 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17283 + movl $(__USER_DS),%eax
17284 +#else
17285 + xorl %eax,%eax
17286 +#endif
17287 movl %eax,%gs
17288
17289 xorl %eax,%eax # Clear LDT
17290 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
17291
17292 cld # gcc2 wants the direction flag cleared at all times
17293 pushl $0 # fake return address for unwinder
17294 -#ifdef CONFIG_SMP
17295 - movb ready, %cl
17296 movb $1, ready
17297 - cmpb $0,%cl # the first CPU calls start_kernel
17298 - je 1f
17299 - movl (stack_start), %esp
17300 -1:
17301 -#endif /* CONFIG_SMP */
17302 jmp *(initial_code)
17303
17304 /*
17305 @@ -546,22 +631,22 @@ early_page_fault:
17306 jmp early_fault
17307
17308 early_fault:
17309 - cld
17310 #ifdef CONFIG_PRINTK
17311 + cmpl $1,%ss:early_recursion_flag
17312 + je hlt_loop
17313 + incl %ss:early_recursion_flag
17314 + cld
17315 pusha
17316 movl $(__KERNEL_DS),%eax
17317 movl %eax,%ds
17318 movl %eax,%es
17319 - cmpl $2,early_recursion_flag
17320 - je hlt_loop
17321 - incl early_recursion_flag
17322 movl %cr2,%eax
17323 pushl %eax
17324 pushl %edx /* trapno */
17325 pushl $fault_msg
17326 call printk
17327 +; call dump_stack
17328 #endif
17329 - call dump_stack
17330 hlt_loop:
17331 hlt
17332 jmp hlt_loop
17333 @@ -569,8 +654,11 @@ hlt_loop:
17334 /* This is the default interrupt "handler" :-) */
17335 ALIGN
17336 ignore_int:
17337 - cld
17338 #ifdef CONFIG_PRINTK
17339 + cmpl $2,%ss:early_recursion_flag
17340 + je hlt_loop
17341 + incl %ss:early_recursion_flag
17342 + cld
17343 pushl %eax
17344 pushl %ecx
17345 pushl %edx
17346 @@ -579,9 +667,6 @@ ignore_int:
17347 movl $(__KERNEL_DS),%eax
17348 movl %eax,%ds
17349 movl %eax,%es
17350 - cmpl $2,early_recursion_flag
17351 - je hlt_loop
17352 - incl early_recursion_flag
17353 pushl 16(%esp)
17354 pushl 24(%esp)
17355 pushl 32(%esp)
17356 @@ -600,6 +685,8 @@ ignore_int:
17357 #endif
17358 iret
17359
17360 +#include "verify_cpu.S"
17361 +
17362 __REFDATA
17363 .align 4
17364 ENTRY(initial_code)
17365 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17366 /*
17367 * BSS section
17368 */
17369 -__PAGE_ALIGNED_BSS
17370 - .align PAGE_SIZE_asm
17371 #ifdef CONFIG_X86_PAE
17372 +.section .swapper_pg_pmd,"a",@progbits
17373 swapper_pg_pmd:
17374 .fill 1024*KPMDS,4,0
17375 #else
17376 +.section .swapper_pg_dir,"a",@progbits
17377 ENTRY(swapper_pg_dir)
17378 .fill 1024,4,0
17379 #endif
17380 +.section .swapper_pg_fixmap,"a",@progbits
17381 swapper_pg_fixmap:
17382 .fill 1024,4,0
17383 #ifdef CONFIG_X86_TRAMPOLINE
17384 +.section .trampoline_pg_dir,"a",@progbits
17385 ENTRY(trampoline_pg_dir)
17386 +#ifdef CONFIG_X86_PAE
17387 + .fill 4,8,0
17388 +#else
17389 .fill 1024,4,0
17390 #endif
17391 +#endif
17392 +
17393 +.section .empty_zero_page,"a",@progbits
17394 ENTRY(empty_zero_page)
17395 .fill 4096,1,0
17396
17397 /*
17398 + * The IDT has to be page-aligned to simplify the Pentium
17399 + * F0 0F bug workaround.. We have a special link segment
17400 + * for this.
17401 + */
17402 +.section .idt,"a",@progbits
17403 +ENTRY(idt_table)
17404 + .fill 256,8,0
17405 +
17406 +/*
17407 * This starts the data section.
17408 */
17409 #ifdef CONFIG_X86_PAE
17410 -__PAGE_ALIGNED_DATA
17411 - /* Page-aligned for the benefit of paravirt? */
17412 - .align PAGE_SIZE_asm
17413 +.section .swapper_pg_dir,"a",@progbits
17414 +
17415 ENTRY(swapper_pg_dir)
17416 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17417 # if KPMDS == 3
17418 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17419 # error "Kernel PMDs should be 1, 2 or 3"
17420 # endif
17421 .align PAGE_SIZE_asm /* needs to be page-sized too */
17422 +
17423 +#ifdef CONFIG_PAX_PER_CPU_PGD
17424 +ENTRY(cpu_pgd)
17425 + .rept NR_CPUS
17426 + .fill 4,8,0
17427 + .endr
17428 +#endif
17429 +
17430 #endif
17431
17432 .data
17433 +.balign 4
17434 ENTRY(stack_start)
17435 - .long init_thread_union+THREAD_SIZE
17436 - .long __BOOT_DS
17437 + .long init_thread_union+THREAD_SIZE-8
17438
17439 ready: .byte 0
17440
17441 +.section .rodata,"a",@progbits
17442 early_recursion_flag:
17443 .long 0
17444
17445 @@ -697,7 +809,7 @@ fault_msg:
17446 .word 0 # 32 bit align gdt_desc.address
17447 boot_gdt_descr:
17448 .word __BOOT_DS+7
17449 - .long boot_gdt - __PAGE_OFFSET
17450 + .long pa(boot_gdt)
17451
17452 .word 0 # 32-bit align idt_desc.address
17453 idt_descr:
17454 @@ -708,7 +820,7 @@ idt_descr:
17455 .word 0 # 32 bit align gdt_desc.address
17456 ENTRY(early_gdt_descr)
17457 .word GDT_ENTRIES*8-1
17458 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17459 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17460
17461 /*
17462 * The boot_gdt must mirror the equivalent in setup.S and is
17463 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17464 .align L1_CACHE_BYTES
17465 ENTRY(boot_gdt)
17466 .fill GDT_ENTRY_BOOT_CS,8,0
17467 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17468 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17469 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17470 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17471 +
17472 + .align PAGE_SIZE_asm
17473 +ENTRY(cpu_gdt_table)
17474 + .rept NR_CPUS
17475 + .quad 0x0000000000000000 /* NULL descriptor */
17476 + .quad 0x0000000000000000 /* 0x0b reserved */
17477 + .quad 0x0000000000000000 /* 0x13 reserved */
17478 + .quad 0x0000000000000000 /* 0x1b reserved */
17479 +
17480 +#ifdef CONFIG_PAX_KERNEXEC
17481 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17482 +#else
17483 + .quad 0x0000000000000000 /* 0x20 unused */
17484 +#endif
17485 +
17486 + .quad 0x0000000000000000 /* 0x28 unused */
17487 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17488 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17489 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17490 + .quad 0x0000000000000000 /* 0x4b reserved */
17491 + .quad 0x0000000000000000 /* 0x53 reserved */
17492 + .quad 0x0000000000000000 /* 0x5b reserved */
17493 +
17494 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17495 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17496 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17497 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17498 +
17499 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17500 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17501 +
17502 + /*
17503 + * Segments used for calling PnP BIOS have byte granularity.
17504 + * The code segments and data segments have fixed 64k limits,
17505 + * the transfer segment sizes are set at run time.
17506 + */
17507 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17508 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17509 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17510 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17511 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17512 +
17513 + /*
17514 + * The APM segments have byte granularity and their bases
17515 + * are set at run time. All have 64k limits.
17516 + */
17517 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17518 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17519 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17520 +
17521 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17522 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17523 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17524 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17525 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17526 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17527 +
17528 + /* Be sure this is zeroed to avoid false validations in Xen */
17529 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17530 + .endr
17531 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17532 index 780cd92..758b2a6 100644
17533 --- a/arch/x86/kernel/head_64.S
17534 +++ b/arch/x86/kernel/head_64.S
17535 @@ -19,6 +19,8 @@
17536 #include <asm/cache.h>
17537 #include <asm/processor-flags.h>
17538 #include <asm/percpu.h>
17539 +#include <asm/cpufeature.h>
17540 +#include <asm/alternative-asm.h>
17541
17542 #ifdef CONFIG_PARAVIRT
17543 #include <asm/asm-offsets.h>
17544 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17545 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17546 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17547 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17548 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17549 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17550 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17551 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17552 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17553 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17554
17555 .text
17556 __HEAD
17557 @@ -85,35 +93,23 @@ startup_64:
17558 */
17559 addq %rbp, init_level4_pgt + 0(%rip)
17560 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17561 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17562 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17563 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17564 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17565
17566 addq %rbp, level3_ident_pgt + 0(%rip)
17567 +#ifndef CONFIG_XEN
17568 + addq %rbp, level3_ident_pgt + 8(%rip)
17569 +#endif
17570
17571 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17572 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17573 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17574 +
17575 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17576 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17577
17578 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17579 -
17580 - /* Add an Identity mapping if I am above 1G */
17581 - leaq _text(%rip), %rdi
17582 - andq $PMD_PAGE_MASK, %rdi
17583 -
17584 - movq %rdi, %rax
17585 - shrq $PUD_SHIFT, %rax
17586 - andq $(PTRS_PER_PUD - 1), %rax
17587 - jz ident_complete
17588 -
17589 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17590 - leaq level3_ident_pgt(%rip), %rbx
17591 - movq %rdx, 0(%rbx, %rax, 8)
17592 -
17593 - movq %rdi, %rax
17594 - shrq $PMD_SHIFT, %rax
17595 - andq $(PTRS_PER_PMD - 1), %rax
17596 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17597 - leaq level2_spare_pgt(%rip), %rbx
17598 - movq %rdx, 0(%rbx, %rax, 8)
17599 -ident_complete:
17600 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17601
17602 /*
17603 * Fixup the kernel text+data virtual addresses. Note that
17604 @@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17605 * after the boot processor executes this code.
17606 */
17607
17608 - /* Enable PAE mode and PGE */
17609 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17610 + /* Enable PAE mode and PSE/PGE */
17611 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17612 movq %rax, %cr4
17613
17614 /* Setup early boot stage 4 level pagetables. */
17615 @@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17616 movl $MSR_EFER, %ecx
17617 rdmsr
17618 btsl $_EFER_SCE, %eax /* Enable System Call */
17619 - btl $20,%edi /* No Execute supported? */
17620 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17621 jnc 1f
17622 btsl $_EFER_NX, %eax
17623 + leaq init_level4_pgt(%rip), %rdi
17624 +#ifndef CONFIG_EFI
17625 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17626 +#endif
17627 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17628 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17629 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17630 1: wrmsr /* Make changes effective */
17631
17632 /* Setup cr0 */
17633 @@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17634 * jump. In addition we need to ensure %cs is set so we make this
17635 * a far return.
17636 */
17637 + pax_set_fptr_mask
17638 movq initial_code(%rip),%rax
17639 pushq $0 # fake return address to stop unwinder
17640 pushq $__KERNEL_CS # set correct cs
17641 @@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17642 .quad x86_64_start_kernel
17643 ENTRY(initial_gs)
17644 .quad INIT_PER_CPU_VAR(irq_stack_union)
17645 - __FINITDATA
17646
17647 ENTRY(stack_start)
17648 .quad init_thread_union+THREAD_SIZE-8
17649 .word 0
17650 + __FINITDATA
17651
17652 bad_address:
17653 jmp bad_address
17654
17655 - .section ".init.text","ax"
17656 + __INIT
17657 #ifdef CONFIG_EARLY_PRINTK
17658 .globl early_idt_handlers
17659 early_idt_handlers:
17660 @@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17661 #endif /* EARLY_PRINTK */
17662 1: hlt
17663 jmp 1b
17664 + .previous
17665
17666 #ifdef CONFIG_EARLY_PRINTK
17667 + __INITDATA
17668 early_recursion_flag:
17669 .long 0
17670 + .previous
17671
17672 + .section .rodata,"a",@progbits
17673 early_idt_msg:
17674 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17675 early_idt_ripmsg:
17676 .asciz "RIP %s\n"
17677 + .previous
17678 #endif /* CONFIG_EARLY_PRINTK */
17679 - .previous
17680
17681 + .section .rodata,"a",@progbits
17682 #define NEXT_PAGE(name) \
17683 .balign PAGE_SIZE; \
17684 ENTRY(name)
17685 @@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17686 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17687 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17688 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17689 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17690 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17691 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17692 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17693 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17694 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17695 .org init_level4_pgt + L4_START_KERNEL*8, 0
17696 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17697 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17698
17699 +#ifdef CONFIG_PAX_PER_CPU_PGD
17700 +NEXT_PAGE(cpu_pgd)
17701 + .rept NR_CPUS
17702 + .fill 512,8,0
17703 + .endr
17704 +#endif
17705 +
17706 NEXT_PAGE(level3_ident_pgt)
17707 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17708 +#ifdef CONFIG_XEN
17709 .fill 511,8,0
17710 +#else
17711 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17712 + .fill 510,8,0
17713 +#endif
17714 +
17715 +NEXT_PAGE(level3_vmalloc_start_pgt)
17716 + .fill 512,8,0
17717 +
17718 +NEXT_PAGE(level3_vmalloc_end_pgt)
17719 + .fill 512,8,0
17720 +
17721 +NEXT_PAGE(level3_vmemmap_pgt)
17722 + .fill L3_VMEMMAP_START,8,0
17723 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17724
17725 NEXT_PAGE(level3_kernel_pgt)
17726 .fill L3_START_KERNEL,8,0
17727 @@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17728 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17729 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17730
17731 +NEXT_PAGE(level2_vmemmap_pgt)
17732 + .fill 512,8,0
17733 +
17734 NEXT_PAGE(level2_fixmap_pgt)
17735 - .fill 506,8,0
17736 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17737 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17738 - .fill 5,8,0
17739 + .fill 507,8,0
17740 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17741 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17742 + .fill 4,8,0
17743
17744 -NEXT_PAGE(level1_fixmap_pgt)
17745 +NEXT_PAGE(level1_vsyscall_pgt)
17746 .fill 512,8,0
17747
17748 -NEXT_PAGE(level2_ident_pgt)
17749 - /* Since I easily can, map the first 1G.
17750 + /* Since I easily can, map the first 2G.
17751 * Don't set NX because code runs from these pages.
17752 */
17753 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17754 +NEXT_PAGE(level2_ident_pgt)
17755 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17756
17757 NEXT_PAGE(level2_kernel_pgt)
17758 /*
17759 @@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17760 * If you want to increase this then increase MODULES_VADDR
17761 * too.)
17762 */
17763 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17764 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17765 -
17766 -NEXT_PAGE(level2_spare_pgt)
17767 - .fill 512, 8, 0
17768 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17769
17770 #undef PMDS
17771 #undef NEXT_PAGE
17772
17773 - .data
17774 + .align PAGE_SIZE
17775 +ENTRY(cpu_gdt_table)
17776 + .rept NR_CPUS
17777 + .quad 0x0000000000000000 /* NULL descriptor */
17778 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17779 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17780 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17781 + .quad 0x00cffb000000ffff /* __USER32_CS */
17782 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17783 + .quad 0x00affb000000ffff /* __USER_CS */
17784 +
17785 +#ifdef CONFIG_PAX_KERNEXEC
17786 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17787 +#else
17788 + .quad 0x0 /* unused */
17789 +#endif
17790 +
17791 + .quad 0,0 /* TSS */
17792 + .quad 0,0 /* LDT */
17793 + .quad 0,0,0 /* three TLS descriptors */
17794 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17795 + /* asm/segment.h:GDT_ENTRIES must match this */
17796 +
17797 + /* zero the remaining page */
17798 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17799 + .endr
17800 +
17801 .align 16
17802 .globl early_gdt_descr
17803 early_gdt_descr:
17804 .word GDT_ENTRIES*8-1
17805 early_gdt_descr_base:
17806 - .quad INIT_PER_CPU_VAR(gdt_page)
17807 + .quad cpu_gdt_table
17808
17809 ENTRY(phys_base)
17810 /* This must match the first entry in level2_kernel_pgt */
17811 .quad 0x0000000000000000
17812
17813 #include "../../x86/xen/xen-head.S"
17814 -
17815 - .section .bss, "aw", @nobits
17816 +
17817 + .section .rodata,"a",@progbits
17818 .align L1_CACHE_BYTES
17819 ENTRY(idt_table)
17820 - .skip IDT_ENTRIES * 16
17821 + .fill 512,8,0
17822
17823 __PAGE_ALIGNED_BSS
17824 .align PAGE_SIZE
17825 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17826 index 9c3bd4a..e1d9b35 100644
17827 --- a/arch/x86/kernel/i386_ksyms_32.c
17828 +++ b/arch/x86/kernel/i386_ksyms_32.c
17829 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17830 EXPORT_SYMBOL(cmpxchg8b_emu);
17831 #endif
17832
17833 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17834 +
17835 /* Networking helper routines. */
17836 EXPORT_SYMBOL(csum_partial_copy_generic);
17837 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17838 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17839
17840 EXPORT_SYMBOL(__get_user_1);
17841 EXPORT_SYMBOL(__get_user_2);
17842 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17843
17844 EXPORT_SYMBOL(csum_partial);
17845 EXPORT_SYMBOL(empty_zero_page);
17846 +
17847 +#ifdef CONFIG_PAX_KERNEXEC
17848 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17849 +#endif
17850 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17851 index df89102..a244320 100644
17852 --- a/arch/x86/kernel/i8259.c
17853 +++ b/arch/x86/kernel/i8259.c
17854 @@ -208,7 +208,7 @@ spurious_8259A_irq:
17855 "spurious 8259A interrupt: IRQ%d.\n", irq);
17856 spurious_irq_mask |= irqmask;
17857 }
17858 - atomic_inc(&irq_err_count);
17859 + atomic_inc_unchecked(&irq_err_count);
17860 /*
17861 * Theoretically we do not have to handle this IRQ,
17862 * but in Linux this does not cause problems and is
17863 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17864 index 3a54dcb..1c22348 100644
17865 --- a/arch/x86/kernel/init_task.c
17866 +++ b/arch/x86/kernel/init_task.c
17867 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17868 * way process stacks are handled. This is done by having a special
17869 * "init_task" linker map entry..
17870 */
17871 -union thread_union init_thread_union __init_task_data =
17872 - { INIT_THREAD_INFO(init_task) };
17873 +union thread_union init_thread_union __init_task_data;
17874
17875 /*
17876 * Initial task structure.
17877 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17878 * section. Since TSS's are completely CPU-local, we want them
17879 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17880 */
17881 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17882 -
17883 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17884 +EXPORT_SYMBOL(init_tss);
17885 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17886 index 99c4d30..74c84e9 100644
17887 --- a/arch/x86/kernel/ioport.c
17888 +++ b/arch/x86/kernel/ioport.c
17889 @@ -6,6 +6,7 @@
17890 #include <linux/sched.h>
17891 #include <linux/kernel.h>
17892 #include <linux/capability.h>
17893 +#include <linux/security.h>
17894 #include <linux/errno.h>
17895 #include <linux/types.h>
17896 #include <linux/ioport.h>
17897 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17898
17899 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17900 return -EINVAL;
17901 +#ifdef CONFIG_GRKERNSEC_IO
17902 + if (turn_on && grsec_disable_privio) {
17903 + gr_handle_ioperm();
17904 + return -EPERM;
17905 + }
17906 +#endif
17907 if (turn_on && !capable(CAP_SYS_RAWIO))
17908 return -EPERM;
17909
17910 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17911 * because the ->io_bitmap_max value must match the bitmap
17912 * contents:
17913 */
17914 - tss = &per_cpu(init_tss, get_cpu());
17915 + tss = init_tss + get_cpu();
17916
17917 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17918
17919 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17920 return -EINVAL;
17921 /* Trying to gain more privileges? */
17922 if (level > old) {
17923 +#ifdef CONFIG_GRKERNSEC_IO
17924 + if (grsec_disable_privio) {
17925 + gr_handle_iopl();
17926 + return -EPERM;
17927 + }
17928 +#endif
17929 if (!capable(CAP_SYS_RAWIO))
17930 return -EPERM;
17931 }
17932 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17933 index 04bbd52..83a07d9 100644
17934 --- a/arch/x86/kernel/irq.c
17935 +++ b/arch/x86/kernel/irq.c
17936 @@ -15,7 +15,7 @@
17937 #include <asm/mce.h>
17938 #include <asm/hw_irq.h>
17939
17940 -atomic_t irq_err_count;
17941 +atomic_unchecked_t irq_err_count;
17942
17943 /* Function pointer for generic interrupt vector handling */
17944 void (*generic_interrupt_extension)(void) = NULL;
17945 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17946 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17947 seq_printf(p, " Machine check polls\n");
17948 #endif
17949 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17950 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17951 #if defined(CONFIG_X86_IO_APIC)
17952 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17953 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17954 #endif
17955 return 0;
17956 }
17957 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17958
17959 u64 arch_irq_stat(void)
17960 {
17961 - u64 sum = atomic_read(&irq_err_count);
17962 + u64 sum = atomic_read_unchecked(&irq_err_count);
17963
17964 #ifdef CONFIG_X86_IO_APIC
17965 - sum += atomic_read(&irq_mis_count);
17966 + sum += atomic_read_unchecked(&irq_mis_count);
17967 #endif
17968 return sum;
17969 }
17970 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17971 index 7d35d0f..03f1d52 100644
17972 --- a/arch/x86/kernel/irq_32.c
17973 +++ b/arch/x86/kernel/irq_32.c
17974 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17975 __asm__ __volatile__("andl %%esp,%0" :
17976 "=r" (sp) : "0" (THREAD_SIZE - 1));
17977
17978 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17979 + return sp < STACK_WARN;
17980 }
17981
17982 static void print_stack_overflow(void)
17983 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17984 * per-CPU IRQ handling contexts (thread information and stack)
17985 */
17986 union irq_ctx {
17987 - struct thread_info tinfo;
17988 - u32 stack[THREAD_SIZE/sizeof(u32)];
17989 -} __attribute__((aligned(PAGE_SIZE)));
17990 + unsigned long previous_esp;
17991 + u32 stack[THREAD_SIZE/sizeof(u32)];
17992 +} __attribute__((aligned(THREAD_SIZE)));
17993
17994 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17995 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17996 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17997 static inline int
17998 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17999 {
18000 - union irq_ctx *curctx, *irqctx;
18001 + union irq_ctx *irqctx;
18002 u32 *isp, arg1, arg2;
18003
18004 - curctx = (union irq_ctx *) current_thread_info();
18005 irqctx = __get_cpu_var(hardirq_ctx);
18006
18007 /*
18008 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18009 * handler) we can't do that and just have to keep using the
18010 * current stack (which is the irq stack already after all)
18011 */
18012 - if (unlikely(curctx == irqctx))
18013 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
18014 return 0;
18015
18016 /* build the stack frame on the IRQ stack */
18017 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18018 - irqctx->tinfo.task = curctx->tinfo.task;
18019 - irqctx->tinfo.previous_esp = current_stack_pointer;
18020 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18021 + irqctx->previous_esp = current_stack_pointer;
18022
18023 - /*
18024 - * Copy the softirq bits in preempt_count so that the
18025 - * softirq checks work in the hardirq context.
18026 - */
18027 - irqctx->tinfo.preempt_count =
18028 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
18029 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
18030 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18031 + __set_fs(MAKE_MM_SEG(0));
18032 +#endif
18033
18034 if (unlikely(overflow))
18035 call_on_stack(print_stack_overflow, isp);
18036 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18037 : "0" (irq), "1" (desc), "2" (isp),
18038 "D" (desc->handle_irq)
18039 : "memory", "cc", "ecx");
18040 +
18041 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18042 + __set_fs(current_thread_info()->addr_limit);
18043 +#endif
18044 +
18045 return 1;
18046 }
18047
18048 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18049 */
18050 void __cpuinit irq_ctx_init(int cpu)
18051 {
18052 - union irq_ctx *irqctx;
18053 -
18054 if (per_cpu(hardirq_ctx, cpu))
18055 return;
18056
18057 - irqctx = &per_cpu(hardirq_stack, cpu);
18058 - irqctx->tinfo.task = NULL;
18059 - irqctx->tinfo.exec_domain = NULL;
18060 - irqctx->tinfo.cpu = cpu;
18061 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
18062 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18063 -
18064 - per_cpu(hardirq_ctx, cpu) = irqctx;
18065 -
18066 - irqctx = &per_cpu(softirq_stack, cpu);
18067 - irqctx->tinfo.task = NULL;
18068 - irqctx->tinfo.exec_domain = NULL;
18069 - irqctx->tinfo.cpu = cpu;
18070 - irqctx->tinfo.preempt_count = 0;
18071 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18072 -
18073 - per_cpu(softirq_ctx, cpu) = irqctx;
18074 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
18075 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
18076
18077 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18078 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18079 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
18080 asmlinkage void do_softirq(void)
18081 {
18082 unsigned long flags;
18083 - struct thread_info *curctx;
18084 union irq_ctx *irqctx;
18085 u32 *isp;
18086
18087 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
18088 local_irq_save(flags);
18089
18090 if (local_softirq_pending()) {
18091 - curctx = current_thread_info();
18092 irqctx = __get_cpu_var(softirq_ctx);
18093 - irqctx->tinfo.task = curctx->task;
18094 - irqctx->tinfo.previous_esp = current_stack_pointer;
18095 + irqctx->previous_esp = current_stack_pointer;
18096
18097 /* build the stack frame on the softirq stack */
18098 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18099 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18100 +
18101 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18102 + __set_fs(MAKE_MM_SEG(0));
18103 +#endif
18104
18105 call_on_stack(__do_softirq, isp);
18106 +
18107 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18108 + __set_fs(current_thread_info()->addr_limit);
18109 +#endif
18110 +
18111 /*
18112 * Shouldnt happen, we returned above if in_interrupt():
18113 */
18114 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
18115 index 8d82a77..0baf312 100644
18116 --- a/arch/x86/kernel/kgdb.c
18117 +++ b/arch/x86/kernel/kgdb.c
18118 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
18119
18120 /* clear the trace bit */
18121 linux_regs->flags &= ~X86_EFLAGS_TF;
18122 - atomic_set(&kgdb_cpu_doing_single_step, -1);
18123 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
18124
18125 /* set the trace bit if we're stepping */
18126 if (remcomInBuffer[0] == 's') {
18127 linux_regs->flags |= X86_EFLAGS_TF;
18128 kgdb_single_step = 1;
18129 - atomic_set(&kgdb_cpu_doing_single_step,
18130 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
18131 raw_smp_processor_id());
18132 }
18133
18134 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
18135 break;
18136
18137 case DIE_DEBUG:
18138 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
18139 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
18140 raw_smp_processor_id()) {
18141 if (user_mode(regs))
18142 return single_step_cont(regs, args);
18143 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
18144 return instruction_pointer(regs);
18145 }
18146
18147 -struct kgdb_arch arch_kgdb_ops = {
18148 +const struct kgdb_arch arch_kgdb_ops = {
18149 /* Breakpoint instruction: */
18150 .gdb_bpt_instr = { 0xcc },
18151 .flags = KGDB_HW_BREAKPOINT,
18152 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
18153 index 7a67820..70ea187 100644
18154 --- a/arch/x86/kernel/kprobes.c
18155 +++ b/arch/x86/kernel/kprobes.c
18156 @@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
18157 char op;
18158 s32 raddr;
18159 } __attribute__((packed)) * jop;
18160 - jop = (struct __arch_jmp_op *)from;
18161 +
18162 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
18163 +
18164 + pax_open_kernel();
18165 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
18166 jop->op = RELATIVEJUMP_INSTRUCTION;
18167 + pax_close_kernel();
18168 }
18169
18170 /*
18171 @@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
18172 kprobe_opcode_t opcode;
18173 kprobe_opcode_t *orig_opcodes = opcodes;
18174
18175 - if (search_exception_tables((unsigned long)opcodes))
18176 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
18177 return 0; /* Page fault may occur on this address. */
18178
18179 retry:
18180 @@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
18181 disp = (u8 *) p->addr + *((s32 *) insn) -
18182 (u8 *) p->ainsn.insn;
18183 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
18184 + pax_open_kernel();
18185 *(s32 *)insn = (s32) disp;
18186 + pax_close_kernel();
18187 }
18188 }
18189 #endif
18190 @@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
18191
18192 static void __kprobes arch_copy_kprobe(struct kprobe *p)
18193 {
18194 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18195 + pax_open_kernel();
18196 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18197 + pax_close_kernel();
18198
18199 fix_riprel(p);
18200
18201 - if (can_boost(p->addr))
18202 + if (can_boost(ktla_ktva(p->addr)))
18203 p->ainsn.boostable = 0;
18204 else
18205 p->ainsn.boostable = -1;
18206
18207 - p->opcode = *p->addr;
18208 + p->opcode = *(ktla_ktva(p->addr));
18209 }
18210
18211 int __kprobes arch_prepare_kprobe(struct kprobe *p)
18212 @@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
18213 if (p->opcode == BREAKPOINT_INSTRUCTION)
18214 regs->ip = (unsigned long)p->addr;
18215 else
18216 - regs->ip = (unsigned long)p->ainsn.insn;
18217 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18218 }
18219
18220 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
18221 @@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
18222 if (p->ainsn.boostable == 1 && !p->post_handler) {
18223 /* Boost up -- we can execute copied instructions directly */
18224 reset_current_kprobe();
18225 - regs->ip = (unsigned long)p->ainsn.insn;
18226 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18227 preempt_enable_no_resched();
18228 return;
18229 }
18230 @@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
18231 struct kprobe_ctlblk *kcb;
18232
18233 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
18234 - if (*addr != BREAKPOINT_INSTRUCTION) {
18235 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
18236 /*
18237 * The breakpoint instruction was removed right
18238 * after we hit it. Another cpu has removed
18239 @@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
18240 /* Skip orig_ax, ip, cs */
18241 " addq $24, %rsp\n"
18242 " popfq\n"
18243 +#ifdef KERNEXEC_PLUGIN
18244 + " btsq $63,(%rsp)\n"
18245 +#endif
18246 #else
18247 " pushf\n"
18248 /*
18249 @@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
18250 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18251 {
18252 unsigned long *tos = stack_addr(regs);
18253 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18254 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18255 unsigned long orig_ip = (unsigned long)p->addr;
18256 kprobe_opcode_t *insn = p->ainsn.insn;
18257
18258 @@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
18259 struct die_args *args = data;
18260 int ret = NOTIFY_DONE;
18261
18262 - if (args->regs && user_mode_vm(args->regs))
18263 + if (args->regs && user_mode(args->regs))
18264 return ret;
18265
18266 switch (val) {
18267 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
18268 index 63b0ec8..6d92227 100644
18269 --- a/arch/x86/kernel/kvm.c
18270 +++ b/arch/x86/kernel/kvm.c
18271 @@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
18272 pv_mmu_ops.set_pud = kvm_set_pud;
18273 #if PAGETABLE_LEVELS == 4
18274 pv_mmu_ops.set_pgd = kvm_set_pgd;
18275 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
18276 #endif
18277 #endif
18278 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
18279 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18280 index ec6ef60..ab2c824 100644
18281 --- a/arch/x86/kernel/ldt.c
18282 +++ b/arch/x86/kernel/ldt.c
18283 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18284 if (reload) {
18285 #ifdef CONFIG_SMP
18286 preempt_disable();
18287 - load_LDT(pc);
18288 + load_LDT_nolock(pc);
18289 if (!cpumask_equal(mm_cpumask(current->mm),
18290 cpumask_of(smp_processor_id())))
18291 smp_call_function(flush_ldt, current->mm, 1);
18292 preempt_enable();
18293 #else
18294 - load_LDT(pc);
18295 + load_LDT_nolock(pc);
18296 #endif
18297 }
18298 if (oldsize) {
18299 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18300 return err;
18301
18302 for (i = 0; i < old->size; i++)
18303 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18304 + write_ldt_entry(new->ldt, i, old->ldt + i);
18305 return 0;
18306 }
18307
18308 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18309 retval = copy_ldt(&mm->context, &old_mm->context);
18310 mutex_unlock(&old_mm->context.lock);
18311 }
18312 +
18313 + if (tsk == current) {
18314 + mm->context.vdso = 0;
18315 +
18316 +#ifdef CONFIG_X86_32
18317 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18318 + mm->context.user_cs_base = 0UL;
18319 + mm->context.user_cs_limit = ~0UL;
18320 +
18321 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18322 + cpus_clear(mm->context.cpu_user_cs_mask);
18323 +#endif
18324 +
18325 +#endif
18326 +#endif
18327 +
18328 + }
18329 +
18330 return retval;
18331 }
18332
18333 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18334 }
18335 }
18336
18337 +#ifdef CONFIG_PAX_SEGMEXEC
18338 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18339 + error = -EINVAL;
18340 + goto out_unlock;
18341 + }
18342 +#endif
18343 +
18344 fill_ldt(&ldt, &ldt_info);
18345 if (oldmode)
18346 ldt.avl = 0;
18347 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18348 index c1c429d..f02eaf9 100644
18349 --- a/arch/x86/kernel/machine_kexec_32.c
18350 +++ b/arch/x86/kernel/machine_kexec_32.c
18351 @@ -26,7 +26,7 @@
18352 #include <asm/system.h>
18353 #include <asm/cacheflush.h>
18354
18355 -static void set_idt(void *newidt, __u16 limit)
18356 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18357 {
18358 struct desc_ptr curidt;
18359
18360 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18361 }
18362
18363
18364 -static void set_gdt(void *newgdt, __u16 limit)
18365 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18366 {
18367 struct desc_ptr curgdt;
18368
18369 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18370 }
18371
18372 control_page = page_address(image->control_code_page);
18373 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18374 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18375
18376 relocate_kernel_ptr = control_page;
18377 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18378 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18379 index 1e47679..e73449d 100644
18380 --- a/arch/x86/kernel/microcode_amd.c
18381 +++ b/arch/x86/kernel/microcode_amd.c
18382 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18383 uci->mc = NULL;
18384 }
18385
18386 -static struct microcode_ops microcode_amd_ops = {
18387 +static const struct microcode_ops microcode_amd_ops = {
18388 .request_microcode_user = request_microcode_user,
18389 .request_microcode_fw = request_microcode_fw,
18390 .collect_cpu_info = collect_cpu_info_amd,
18391 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18392 .microcode_fini_cpu = microcode_fini_cpu_amd,
18393 };
18394
18395 -struct microcode_ops * __init init_amd_microcode(void)
18396 +const struct microcode_ops * __init init_amd_microcode(void)
18397 {
18398 return &microcode_amd_ops;
18399 }
18400 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18401 index 378e9a8..b5a6ea9 100644
18402 --- a/arch/x86/kernel/microcode_core.c
18403 +++ b/arch/x86/kernel/microcode_core.c
18404 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18405
18406 #define MICROCODE_VERSION "2.00"
18407
18408 -static struct microcode_ops *microcode_ops;
18409 +static const struct microcode_ops *microcode_ops;
18410
18411 /*
18412 * Synchronization.
18413 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18414 index 0d334dd..14cedaf 100644
18415 --- a/arch/x86/kernel/microcode_intel.c
18416 +++ b/arch/x86/kernel/microcode_intel.c
18417 @@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18418
18419 static int get_ucode_user(void *to, const void *from, size_t n)
18420 {
18421 - return copy_from_user(to, from, n);
18422 + return copy_from_user(to, (const void __force_user *)from, n);
18423 }
18424
18425 static enum ucode_state
18426 request_microcode_user(int cpu, const void __user *buf, size_t size)
18427 {
18428 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18429 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18430 }
18431
18432 static void microcode_fini_cpu(int cpu)
18433 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18434 uci->mc = NULL;
18435 }
18436
18437 -static struct microcode_ops microcode_intel_ops = {
18438 +static const struct microcode_ops microcode_intel_ops = {
18439 .request_microcode_user = request_microcode_user,
18440 .request_microcode_fw = request_microcode_fw,
18441 .collect_cpu_info = collect_cpu_info,
18442 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18443 .microcode_fini_cpu = microcode_fini_cpu,
18444 };
18445
18446 -struct microcode_ops * __init init_intel_microcode(void)
18447 +const struct microcode_ops * __init init_intel_microcode(void)
18448 {
18449 return &microcode_intel_ops;
18450 }
18451 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18452 index 89f386f..9028f51 100644
18453 --- a/arch/x86/kernel/module.c
18454 +++ b/arch/x86/kernel/module.c
18455 @@ -34,7 +34,7 @@
18456 #define DEBUGP(fmt...)
18457 #endif
18458
18459 -void *module_alloc(unsigned long size)
18460 +static void *__module_alloc(unsigned long size, pgprot_t prot)
18461 {
18462 struct vm_struct *area;
18463
18464 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18465 if (!area)
18466 return NULL;
18467
18468 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18469 - PAGE_KERNEL_EXEC);
18470 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18471 +}
18472 +
18473 +void *module_alloc(unsigned long size)
18474 +{
18475 +
18476 +#ifdef CONFIG_PAX_KERNEXEC
18477 + return __module_alloc(size, PAGE_KERNEL);
18478 +#else
18479 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18480 +#endif
18481 +
18482 }
18483
18484 /* Free memory returned from module_alloc */
18485 @@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18486 vfree(module_region);
18487 }
18488
18489 +#ifdef CONFIG_PAX_KERNEXEC
18490 +#ifdef CONFIG_X86_32
18491 +void *module_alloc_exec(unsigned long size)
18492 +{
18493 + struct vm_struct *area;
18494 +
18495 + if (size == 0)
18496 + return NULL;
18497 +
18498 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18499 + return area ? area->addr : NULL;
18500 +}
18501 +EXPORT_SYMBOL(module_alloc_exec);
18502 +
18503 +void module_free_exec(struct module *mod, void *module_region)
18504 +{
18505 + vunmap(module_region);
18506 +}
18507 +EXPORT_SYMBOL(module_free_exec);
18508 +#else
18509 +void module_free_exec(struct module *mod, void *module_region)
18510 +{
18511 + module_free(mod, module_region);
18512 +}
18513 +EXPORT_SYMBOL(module_free_exec);
18514 +
18515 +void *module_alloc_exec(unsigned long size)
18516 +{
18517 + return __module_alloc(size, PAGE_KERNEL_RX);
18518 +}
18519 +EXPORT_SYMBOL(module_alloc_exec);
18520 +#endif
18521 +#endif
18522 +
18523 /* We don't need anything special. */
18524 int module_frob_arch_sections(Elf_Ehdr *hdr,
18525 Elf_Shdr *sechdrs,
18526 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18527 unsigned int i;
18528 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18529 Elf32_Sym *sym;
18530 - uint32_t *location;
18531 + uint32_t *plocation, location;
18532
18533 DEBUGP("Applying relocate section %u to %u\n", relsec,
18534 sechdrs[relsec].sh_info);
18535 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18536 /* This is where to make the change */
18537 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18538 - + rel[i].r_offset;
18539 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18540 + location = (uint32_t)plocation;
18541 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18542 + plocation = ktla_ktva((void *)plocation);
18543 /* This is the symbol it is referring to. Note that all
18544 undefined symbols have been resolved. */
18545 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18546 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18547 switch (ELF32_R_TYPE(rel[i].r_info)) {
18548 case R_386_32:
18549 /* We add the value into the location given */
18550 - *location += sym->st_value;
18551 + pax_open_kernel();
18552 + *plocation += sym->st_value;
18553 + pax_close_kernel();
18554 break;
18555 case R_386_PC32:
18556 /* Add the value, subtract its postition */
18557 - *location += sym->st_value - (uint32_t)location;
18558 + pax_open_kernel();
18559 + *plocation += sym->st_value - location;
18560 + pax_close_kernel();
18561 break;
18562 default:
18563 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18564 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18565 case R_X86_64_NONE:
18566 break;
18567 case R_X86_64_64:
18568 + pax_open_kernel();
18569 *(u64 *)loc = val;
18570 + pax_close_kernel();
18571 break;
18572 case R_X86_64_32:
18573 + pax_open_kernel();
18574 *(u32 *)loc = val;
18575 + pax_close_kernel();
18576 if (val != *(u32 *)loc)
18577 goto overflow;
18578 break;
18579 case R_X86_64_32S:
18580 + pax_open_kernel();
18581 *(s32 *)loc = val;
18582 + pax_close_kernel();
18583 if ((s64)val != *(s32 *)loc)
18584 goto overflow;
18585 break;
18586 case R_X86_64_PC32:
18587 val -= (u64)loc;
18588 + pax_open_kernel();
18589 *(u32 *)loc = val;
18590 + pax_close_kernel();
18591 +
18592 #if 0
18593 if ((s64)val != *(s32 *)loc)
18594 goto overflow;
18595 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18596 index 3a7c5a4..9191528 100644
18597 --- a/arch/x86/kernel/paravirt-spinlocks.c
18598 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18599 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18600 __raw_spin_lock(lock);
18601 }
18602
18603 -struct pv_lock_ops pv_lock_ops = {
18604 +struct pv_lock_ops pv_lock_ops __read_only = {
18605 #ifdef CONFIG_SMP
18606 .spin_is_locked = __ticket_spin_is_locked,
18607 .spin_is_contended = __ticket_spin_is_contended,
18608 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18609 index 1b1739d..dea6077 100644
18610 --- a/arch/x86/kernel/paravirt.c
18611 +++ b/arch/x86/kernel/paravirt.c
18612 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18613 {
18614 return x;
18615 }
18616 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18617 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18618 +#endif
18619
18620 void __init default_banner(void)
18621 {
18622 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18623 * corresponding structure. */
18624 static void *get_call_destination(u8 type)
18625 {
18626 - struct paravirt_patch_template tmpl = {
18627 + const struct paravirt_patch_template tmpl = {
18628 .pv_init_ops = pv_init_ops,
18629 .pv_time_ops = pv_time_ops,
18630 .pv_cpu_ops = pv_cpu_ops,
18631 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18632 .pv_lock_ops = pv_lock_ops,
18633 #endif
18634 };
18635 +
18636 + pax_track_stack();
18637 return *((void **)&tmpl + type);
18638 }
18639
18640 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18641 if (opfunc == NULL)
18642 /* If there's no function, patch it with a ud2a (BUG) */
18643 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18644 - else if (opfunc == _paravirt_nop)
18645 + else if (opfunc == (void *)_paravirt_nop)
18646 /* If the operation is a nop, then nop the callsite */
18647 ret = paravirt_patch_nop();
18648
18649 /* identity functions just return their single argument */
18650 - else if (opfunc == _paravirt_ident_32)
18651 + else if (opfunc == (void *)_paravirt_ident_32)
18652 ret = paravirt_patch_ident_32(insnbuf, len);
18653 - else if (opfunc == _paravirt_ident_64)
18654 + else if (opfunc == (void *)_paravirt_ident_64)
18655 ret = paravirt_patch_ident_64(insnbuf, len);
18656 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18657 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18658 + ret = paravirt_patch_ident_64(insnbuf, len);
18659 +#endif
18660
18661 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18662 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18663 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18664 if (insn_len > len || start == NULL)
18665 insn_len = len;
18666 else
18667 - memcpy(insnbuf, start, insn_len);
18668 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18669
18670 return insn_len;
18671 }
18672 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18673 preempt_enable();
18674 }
18675
18676 -struct pv_info pv_info = {
18677 +struct pv_info pv_info __read_only = {
18678 .name = "bare hardware",
18679 .paravirt_enabled = 0,
18680 .kernel_rpl = 0,
18681 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18682 };
18683
18684 -struct pv_init_ops pv_init_ops = {
18685 +struct pv_init_ops pv_init_ops __read_only = {
18686 .patch = native_patch,
18687 };
18688
18689 -struct pv_time_ops pv_time_ops = {
18690 +struct pv_time_ops pv_time_ops __read_only = {
18691 .sched_clock = native_sched_clock,
18692 };
18693
18694 -struct pv_irq_ops pv_irq_ops = {
18695 +struct pv_irq_ops pv_irq_ops __read_only = {
18696 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18697 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18698 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18699 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18700 #endif
18701 };
18702
18703 -struct pv_cpu_ops pv_cpu_ops = {
18704 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18705 .cpuid = native_cpuid,
18706 .get_debugreg = native_get_debugreg,
18707 .set_debugreg = native_set_debugreg,
18708 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18709 .end_context_switch = paravirt_nop,
18710 };
18711
18712 -struct pv_apic_ops pv_apic_ops = {
18713 +struct pv_apic_ops pv_apic_ops __read_only = {
18714 #ifdef CONFIG_X86_LOCAL_APIC
18715 .startup_ipi_hook = paravirt_nop,
18716 #endif
18717 };
18718
18719 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18720 +#ifdef CONFIG_X86_32
18721 +#ifdef CONFIG_X86_PAE
18722 +/* 64-bit pagetable entries */
18723 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18724 +#else
18725 /* 32-bit pagetable entries */
18726 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18727 +#endif
18728 #else
18729 /* 64-bit pagetable entries */
18730 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18731 #endif
18732
18733 -struct pv_mmu_ops pv_mmu_ops = {
18734 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18735
18736 .read_cr2 = native_read_cr2,
18737 .write_cr2 = native_write_cr2,
18738 @@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18739 .make_pud = PTE_IDENT,
18740
18741 .set_pgd = native_set_pgd,
18742 + .set_pgd_batched = native_set_pgd_batched,
18743 #endif
18744 #endif /* PAGETABLE_LEVELS >= 3 */
18745
18746 @@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18747 },
18748
18749 .set_fixmap = native_set_fixmap,
18750 +
18751 +#ifdef CONFIG_PAX_KERNEXEC
18752 + .pax_open_kernel = native_pax_open_kernel,
18753 + .pax_close_kernel = native_pax_close_kernel,
18754 +#endif
18755 +
18756 };
18757
18758 EXPORT_SYMBOL_GPL(pv_time_ops);
18759 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18760 index 1a2d4b1..6a0dd55 100644
18761 --- a/arch/x86/kernel/pci-calgary_64.c
18762 +++ b/arch/x86/kernel/pci-calgary_64.c
18763 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18764 free_pages((unsigned long)vaddr, get_order(size));
18765 }
18766
18767 -static struct dma_map_ops calgary_dma_ops = {
18768 +static const struct dma_map_ops calgary_dma_ops = {
18769 .alloc_coherent = calgary_alloc_coherent,
18770 .free_coherent = calgary_free_coherent,
18771 .map_sg = calgary_map_sg,
18772 diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18773 index 6ac3931..42b4414 100644
18774 --- a/arch/x86/kernel/pci-dma.c
18775 +++ b/arch/x86/kernel/pci-dma.c
18776 @@ -14,7 +14,7 @@
18777
18778 static int forbid_dac __read_mostly;
18779
18780 -struct dma_map_ops *dma_ops;
18781 +const struct dma_map_ops *dma_ops;
18782 EXPORT_SYMBOL(dma_ops);
18783
18784 static int iommu_sac_force __read_mostly;
18785 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18786
18787 int dma_supported(struct device *dev, u64 mask)
18788 {
18789 - struct dma_map_ops *ops = get_dma_ops(dev);
18790 + const struct dma_map_ops *ops = get_dma_ops(dev);
18791
18792 #ifdef CONFIG_PCI
18793 if (mask > 0xffffffff && forbid_dac > 0) {
18794 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18795 index 1c76691..e3632db 100644
18796 --- a/arch/x86/kernel/pci-gart_64.c
18797 +++ b/arch/x86/kernel/pci-gart_64.c
18798 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18799 return -1;
18800 }
18801
18802 -static struct dma_map_ops gart_dma_ops = {
18803 +static const struct dma_map_ops gart_dma_ops = {
18804 .map_sg = gart_map_sg,
18805 .unmap_sg = gart_unmap_sg,
18806 .map_page = gart_map_page,
18807 diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18808 index a3933d4..c898869 100644
18809 --- a/arch/x86/kernel/pci-nommu.c
18810 +++ b/arch/x86/kernel/pci-nommu.c
18811 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18812 flush_write_buffers();
18813 }
18814
18815 -struct dma_map_ops nommu_dma_ops = {
18816 +const struct dma_map_ops nommu_dma_ops = {
18817 .alloc_coherent = dma_generic_alloc_coherent,
18818 .free_coherent = nommu_free_coherent,
18819 .map_sg = nommu_map_sg,
18820 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18821 index aaa6b78..4de1881 100644
18822 --- a/arch/x86/kernel/pci-swiotlb.c
18823 +++ b/arch/x86/kernel/pci-swiotlb.c
18824 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18825 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18826 }
18827
18828 -static struct dma_map_ops swiotlb_dma_ops = {
18829 +static const struct dma_map_ops swiotlb_dma_ops = {
18830 .mapping_error = swiotlb_dma_mapping_error,
18831 .alloc_coherent = x86_swiotlb_alloc_coherent,
18832 .free_coherent = swiotlb_free_coherent,
18833 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18834 index fc6c84d..0312ca2 100644
18835 --- a/arch/x86/kernel/process.c
18836 +++ b/arch/x86/kernel/process.c
18837 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18838
18839 void free_thread_info(struct thread_info *ti)
18840 {
18841 - free_thread_xstate(ti->task);
18842 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18843 }
18844
18845 +static struct kmem_cache *task_struct_cachep;
18846 +
18847 void arch_task_cache_init(void)
18848 {
18849 - task_xstate_cachep =
18850 - kmem_cache_create("task_xstate", xstate_size,
18851 + /* create a slab on which task_structs can be allocated */
18852 + task_struct_cachep =
18853 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18854 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18855 +
18856 + task_xstate_cachep =
18857 + kmem_cache_create("task_xstate", xstate_size,
18858 __alignof__(union thread_xstate),
18859 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18860 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18861 +}
18862 +
18863 +struct task_struct *alloc_task_struct(void)
18864 +{
18865 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18866 +}
18867 +
18868 +void free_task_struct(struct task_struct *task)
18869 +{
18870 + free_thread_xstate(task);
18871 + kmem_cache_free(task_struct_cachep, task);
18872 }
18873
18874 /*
18875 @@ -73,7 +90,7 @@ void exit_thread(void)
18876 unsigned long *bp = t->io_bitmap_ptr;
18877
18878 if (bp) {
18879 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18880 + struct tss_struct *tss = init_tss + get_cpu();
18881
18882 t->io_bitmap_ptr = NULL;
18883 clear_thread_flag(TIF_IO_BITMAP);
18884 @@ -93,6 +110,9 @@ void flush_thread(void)
18885
18886 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18887
18888 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18889 + loadsegment(gs, 0);
18890 +#endif
18891 tsk->thread.debugreg0 = 0;
18892 tsk->thread.debugreg1 = 0;
18893 tsk->thread.debugreg2 = 0;
18894 @@ -307,7 +327,7 @@ void default_idle(void)
18895 EXPORT_SYMBOL(default_idle);
18896 #endif
18897
18898 -void stop_this_cpu(void *dummy)
18899 +__noreturn void stop_this_cpu(void *dummy)
18900 {
18901 local_irq_disable();
18902 /*
18903 @@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18904 }
18905 early_param("idle", idle_setup);
18906
18907 -unsigned long arch_align_stack(unsigned long sp)
18908 +#ifdef CONFIG_PAX_RANDKSTACK
18909 +void pax_randomize_kstack(struct pt_regs *regs)
18910 {
18911 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18912 - sp -= get_random_int() % 8192;
18913 - return sp & ~0xf;
18914 -}
18915 + struct thread_struct *thread = &current->thread;
18916 + unsigned long time;
18917
18918 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18919 -{
18920 - unsigned long range_end = mm->brk + 0x02000000;
18921 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18922 + if (!randomize_va_space)
18923 + return;
18924 +
18925 + if (v8086_mode(regs))
18926 + return;
18927 +
18928 + rdtscl(time);
18929 +
18930 + /* P4 seems to return a 0 LSB, ignore it */
18931 +#ifdef CONFIG_MPENTIUM4
18932 + time &= 0x3EUL;
18933 + time <<= 2;
18934 +#elif defined(CONFIG_X86_64)
18935 + time &= 0xFUL;
18936 + time <<= 4;
18937 +#else
18938 + time &= 0x1FUL;
18939 + time <<= 3;
18940 +#endif
18941 +
18942 + thread->sp0 ^= time;
18943 + load_sp0(init_tss + smp_processor_id(), thread);
18944 +
18945 +#ifdef CONFIG_X86_64
18946 + percpu_write(kernel_stack, thread->sp0);
18947 +#endif
18948 }
18949 +#endif
18950
18951 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18952 index c40c432..6e1df72 100644
18953 --- a/arch/x86/kernel/process_32.c
18954 +++ b/arch/x86/kernel/process_32.c
18955 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18956 unsigned long thread_saved_pc(struct task_struct *tsk)
18957 {
18958 return ((unsigned long *)tsk->thread.sp)[3];
18959 +//XXX return tsk->thread.eip;
18960 }
18961
18962 #ifndef CONFIG_SMP
18963 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18964 unsigned short ss, gs;
18965 const char *board;
18966
18967 - if (user_mode_vm(regs)) {
18968 + if (user_mode(regs)) {
18969 sp = regs->sp;
18970 ss = regs->ss & 0xffff;
18971 - gs = get_user_gs(regs);
18972 } else {
18973 sp = (unsigned long) (&regs->sp);
18974 savesegment(ss, ss);
18975 - savesegment(gs, gs);
18976 }
18977 + gs = get_user_gs(regs);
18978
18979 printk("\n");
18980
18981 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18982 regs.bx = (unsigned long) fn;
18983 regs.dx = (unsigned long) arg;
18984
18985 - regs.ds = __USER_DS;
18986 - regs.es = __USER_DS;
18987 + regs.ds = __KERNEL_DS;
18988 + regs.es = __KERNEL_DS;
18989 regs.fs = __KERNEL_PERCPU;
18990 - regs.gs = __KERNEL_STACK_CANARY;
18991 + savesegment(gs, regs.gs);
18992 regs.orig_ax = -1;
18993 regs.ip = (unsigned long) kernel_thread_helper;
18994 regs.cs = __KERNEL_CS | get_kernel_rpl();
18995 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18996 struct task_struct *tsk;
18997 int err;
18998
18999 - childregs = task_pt_regs(p);
19000 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
19001 *childregs = *regs;
19002 childregs->ax = 0;
19003 childregs->sp = sp;
19004
19005 p->thread.sp = (unsigned long) childregs;
19006 p->thread.sp0 = (unsigned long) (childregs+1);
19007 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19008
19009 p->thread.ip = (unsigned long) ret_from_fork;
19010
19011 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19012 struct thread_struct *prev = &prev_p->thread,
19013 *next = &next_p->thread;
19014 int cpu = smp_processor_id();
19015 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
19016 + struct tss_struct *tss = init_tss + cpu;
19017 bool preload_fpu;
19018
19019 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
19020 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19021 */
19022 lazy_save_gs(prev->gs);
19023
19024 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19025 + __set_fs(task_thread_info(next_p)->addr_limit);
19026 +#endif
19027 +
19028 /*
19029 * Load the per-thread Thread-Local Storage descriptor.
19030 */
19031 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19032 */
19033 arch_end_context_switch(next_p);
19034
19035 + percpu_write(current_task, next_p);
19036 + percpu_write(current_tinfo, &next_p->tinfo);
19037 +
19038 if (preload_fpu)
19039 __math_state_restore();
19040
19041 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19042 if (prev->gs | next->gs)
19043 lazy_load_gs(next->gs);
19044
19045 - percpu_write(current_task, next_p);
19046 -
19047 return prev_p;
19048 }
19049
19050 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
19051 } while (count++ < 16);
19052 return 0;
19053 }
19054 -
19055 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
19056 index 39493bc..196816d 100644
19057 --- a/arch/x86/kernel/process_64.c
19058 +++ b/arch/x86/kernel/process_64.c
19059 @@ -91,7 +91,7 @@ static void __exit_idle(void)
19060 void exit_idle(void)
19061 {
19062 /* idle loop has pid 0 */
19063 - if (current->pid)
19064 + if (task_pid_nr(current))
19065 return;
19066 __exit_idle();
19067 }
19068 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
19069 if (!board)
19070 board = "";
19071 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
19072 - current->pid, current->comm, print_tainted(),
19073 + task_pid_nr(current), current->comm, print_tainted(),
19074 init_utsname()->release,
19075 (int)strcspn(init_utsname()->version, " "),
19076 init_utsname()->version, board);
19077 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19078 struct pt_regs *childregs;
19079 struct task_struct *me = current;
19080
19081 - childregs = ((struct pt_regs *)
19082 - (THREAD_SIZE + task_stack_page(p))) - 1;
19083 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
19084 *childregs = *regs;
19085
19086 childregs->ax = 0;
19087 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19088 p->thread.sp = (unsigned long) childregs;
19089 p->thread.sp0 = (unsigned long) (childregs+1);
19090 p->thread.usersp = me->thread.usersp;
19091 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19092
19093 set_tsk_thread_flag(p, TIF_FORK);
19094
19095 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19096 struct thread_struct *prev = &prev_p->thread;
19097 struct thread_struct *next = &next_p->thread;
19098 int cpu = smp_processor_id();
19099 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
19100 + struct tss_struct *tss = init_tss + cpu;
19101 unsigned fsindex, gsindex;
19102 bool preload_fpu;
19103
19104 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19105 prev->usersp = percpu_read(old_rsp);
19106 percpu_write(old_rsp, next->usersp);
19107 percpu_write(current_task, next_p);
19108 + percpu_write(current_tinfo, &next_p->tinfo);
19109
19110 - percpu_write(kernel_stack,
19111 - (unsigned long)task_stack_page(next_p) +
19112 - THREAD_SIZE - KERNEL_STACK_OFFSET);
19113 + percpu_write(kernel_stack, next->sp0);
19114
19115 /*
19116 * Now maybe reload the debug registers and handle I/O bitmaps
19117 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
19118 if (!p || p == current || p->state == TASK_RUNNING)
19119 return 0;
19120 stack = (unsigned long)task_stack_page(p);
19121 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
19122 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
19123 return 0;
19124 fp = *(u64 *)(p->thread.sp);
19125 do {
19126 - if (fp < (unsigned long)stack ||
19127 - fp >= (unsigned long)stack+THREAD_SIZE)
19128 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
19129 return 0;
19130 ip = *(u64 *)(fp+8);
19131 if (!in_sched_functions(ip))
19132 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
19133 index c06acdd..3f5fff5 100644
19134 --- a/arch/x86/kernel/ptrace.c
19135 +++ b/arch/x86/kernel/ptrace.c
19136 @@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
19137 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19138 {
19139 int ret;
19140 - unsigned long __user *datap = (unsigned long __user *)data;
19141 + unsigned long __user *datap = (__force unsigned long __user *)data;
19142
19143 switch (request) {
19144 /* read the word at location addr in the USER area. */
19145 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19146 if (addr < 0)
19147 return -EIO;
19148 ret = do_get_thread_area(child, addr,
19149 - (struct user_desc __user *) data);
19150 + (__force struct user_desc __user *) data);
19151 break;
19152
19153 case PTRACE_SET_THREAD_AREA:
19154 if (addr < 0)
19155 return -EIO;
19156 ret = do_set_thread_area(child, addr,
19157 - (struct user_desc __user *) data, 0);
19158 + (__force struct user_desc __user *) data, 0);
19159 break;
19160 #endif
19161
19162 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19163 #ifdef CONFIG_X86_PTRACE_BTS
19164 case PTRACE_BTS_CONFIG:
19165 ret = ptrace_bts_config
19166 - (child, data, (struct ptrace_bts_config __user *)addr);
19167 + (child, data, (__force struct ptrace_bts_config __user *)addr);
19168 break;
19169
19170 case PTRACE_BTS_STATUS:
19171 ret = ptrace_bts_status
19172 - (child, data, (struct ptrace_bts_config __user *)addr);
19173 + (child, data, (__force struct ptrace_bts_config __user *)addr);
19174 break;
19175
19176 case PTRACE_BTS_SIZE:
19177 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19178
19179 case PTRACE_BTS_GET:
19180 ret = ptrace_bts_read_record
19181 - (child, data, (struct bts_struct __user *) addr);
19182 + (child, data, (__force struct bts_struct __user *) addr);
19183 break;
19184
19185 case PTRACE_BTS_CLEAR:
19186 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19187
19188 case PTRACE_BTS_DRAIN:
19189 ret = ptrace_bts_drain
19190 - (child, data, (struct bts_struct __user *) addr);
19191 + (child, data, (__force struct bts_struct __user *) addr);
19192 break;
19193 #endif /* CONFIG_X86_PTRACE_BTS */
19194
19195 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19196 info.si_code = si_code;
19197
19198 /* User-mode ip? */
19199 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
19200 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
19201
19202 /* Send us the fake SIGTRAP */
19203 force_sig_info(SIGTRAP, &info, tsk);
19204 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19205 * We must return the syscall number to actually look up in the table.
19206 * This can be -1L to skip running any syscall at all.
19207 */
19208 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
19209 +long syscall_trace_enter(struct pt_regs *regs)
19210 {
19211 long ret = 0;
19212
19213 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
19214 return ret ?: regs->orig_ax;
19215 }
19216
19217 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
19218 +void syscall_trace_leave(struct pt_regs *regs)
19219 {
19220 if (unlikely(current->audit_context))
19221 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
19222 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
19223 index cf98100..e76e03d 100644
19224 --- a/arch/x86/kernel/reboot.c
19225 +++ b/arch/x86/kernel/reboot.c
19226 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
19227 EXPORT_SYMBOL(pm_power_off);
19228
19229 static const struct desc_ptr no_idt = {};
19230 -static int reboot_mode;
19231 +static unsigned short reboot_mode;
19232 enum reboot_type reboot_type = BOOT_KBD;
19233 int reboot_force;
19234
19235 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
19236 controller to pulse the CPU reset line, which is more thorough, but
19237 doesn't work with at least one type of 486 motherboard. It is easy
19238 to stop this code working; hence the copious comments. */
19239 -static const unsigned long long
19240 -real_mode_gdt_entries [3] =
19241 +static struct desc_struct
19242 +real_mode_gdt_entries [3] __read_only =
19243 {
19244 - 0x0000000000000000ULL, /* Null descriptor */
19245 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
19246 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
19247 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
19248 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
19249 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
19250 };
19251
19252 static const struct desc_ptr
19253 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
19254 * specified by the code and length parameters.
19255 * We assume that length will aways be less that 100!
19256 */
19257 -void machine_real_restart(const unsigned char *code, int length)
19258 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
19259 {
19260 local_irq_disable();
19261
19262 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
19263 /* Remap the kernel at virtual address zero, as well as offset zero
19264 from the kernel segment. This assumes the kernel segment starts at
19265 virtual address PAGE_OFFSET. */
19266 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19267 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
19268 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19269 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
19270
19271 /*
19272 * Use `swapper_pg_dir' as our page directory.
19273 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
19274 boot)". This seems like a fairly standard thing that gets set by
19275 REBOOT.COM programs, and the previous reset routine did this
19276 too. */
19277 - *((unsigned short *)0x472) = reboot_mode;
19278 + *(unsigned short *)(__va(0x472)) = reboot_mode;
19279
19280 /* For the switch to real mode, copy some code to low memory. It has
19281 to be in the first 64k because it is running in 16-bit mode, and it
19282 has to have the same physical and virtual address, because it turns
19283 off paging. Copy it near the end of the first page, out of the way
19284 of BIOS variables. */
19285 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
19286 - real_mode_switch, sizeof (real_mode_switch));
19287 - memcpy((void *)(0x1000 - 100), code, length);
19288 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
19289 + memcpy(__va(0x1000 - 100), code, length);
19290
19291 /* Set up the IDT for real mode. */
19292 load_idt(&real_mode_idt);
19293 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
19294 __asm__ __volatile__ ("ljmp $0x0008,%0"
19295 :
19296 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19297 + do { } while (1);
19298 }
19299 #ifdef CONFIG_APM_MODULE
19300 EXPORT_SYMBOL(machine_real_restart);
19301 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19302 {
19303 }
19304
19305 -static void native_machine_emergency_restart(void)
19306 +__noreturn static void native_machine_emergency_restart(void)
19307 {
19308 int i;
19309
19310 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19311 #endif
19312 }
19313
19314 -static void __machine_emergency_restart(int emergency)
19315 +static __noreturn void __machine_emergency_restart(int emergency)
19316 {
19317 reboot_emergency = emergency;
19318 machine_ops.emergency_restart();
19319 }
19320
19321 -static void native_machine_restart(char *__unused)
19322 +static __noreturn void native_machine_restart(char *__unused)
19323 {
19324 printk("machine restart\n");
19325
19326 @@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19327 __machine_emergency_restart(0);
19328 }
19329
19330 -static void native_machine_halt(void)
19331 +static __noreturn void native_machine_halt(void)
19332 {
19333 /* stop other cpus and apics */
19334 machine_shutdown();
19335 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
19336 stop_this_cpu(NULL);
19337 }
19338
19339 -static void native_machine_power_off(void)
19340 +__noreturn static void native_machine_power_off(void)
19341 {
19342 if (pm_power_off) {
19343 if (!reboot_force)
19344 @@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19345 }
19346 /* a fallback in case there is no PM info available */
19347 tboot_shutdown(TB_SHUTDOWN_HALT);
19348 + do { } while (1);
19349 }
19350
19351 struct machine_ops machine_ops = {
19352 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19353 index 7a6f3b3..976a959 100644
19354 --- a/arch/x86/kernel/relocate_kernel_64.S
19355 +++ b/arch/x86/kernel/relocate_kernel_64.S
19356 @@ -11,6 +11,7 @@
19357 #include <asm/kexec.h>
19358 #include <asm/processor-flags.h>
19359 #include <asm/pgtable_types.h>
19360 +#include <asm/alternative-asm.h>
19361
19362 /*
19363 * Must be relocatable PIC code callable as a C function
19364 @@ -167,6 +168,7 @@ identity_mapped:
19365 xorq %r14, %r14
19366 xorq %r15, %r15
19367
19368 + pax_force_retaddr 0, 1
19369 ret
19370
19371 1:
19372 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19373 index 5449a26..0b6c759 100644
19374 --- a/arch/x86/kernel/setup.c
19375 +++ b/arch/x86/kernel/setup.c
19376 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19377
19378 if (!boot_params.hdr.root_flags)
19379 root_mountflags &= ~MS_RDONLY;
19380 - init_mm.start_code = (unsigned long) _text;
19381 - init_mm.end_code = (unsigned long) _etext;
19382 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19383 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19384 init_mm.end_data = (unsigned long) _edata;
19385 init_mm.brk = _brk_end;
19386
19387 - code_resource.start = virt_to_phys(_text);
19388 - code_resource.end = virt_to_phys(_etext)-1;
19389 - data_resource.start = virt_to_phys(_etext);
19390 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19391 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19392 + data_resource.start = virt_to_phys(_sdata);
19393 data_resource.end = virt_to_phys(_edata)-1;
19394 bss_resource.start = virt_to_phys(&__bss_start);
19395 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19396 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19397 index d559af9..524c6ad 100644
19398 --- a/arch/x86/kernel/setup_percpu.c
19399 +++ b/arch/x86/kernel/setup_percpu.c
19400 @@ -25,19 +25,17 @@
19401 # define DBG(x...)
19402 #endif
19403
19404 -DEFINE_PER_CPU(int, cpu_number);
19405 +#ifdef CONFIG_SMP
19406 +DEFINE_PER_CPU(unsigned int, cpu_number);
19407 EXPORT_PER_CPU_SYMBOL(cpu_number);
19408 +#endif
19409
19410 -#ifdef CONFIG_X86_64
19411 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19412 -#else
19413 -#define BOOT_PERCPU_OFFSET 0
19414 -#endif
19415
19416 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19417 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19418
19419 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19420 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19421 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19422 };
19423 EXPORT_SYMBOL(__per_cpu_offset);
19424 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19425 {
19426 #ifdef CONFIG_X86_32
19427 struct desc_struct gdt;
19428 + unsigned long base = per_cpu_offset(cpu);
19429
19430 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19431 - 0x2 | DESCTYPE_S, 0x8);
19432 - gdt.s = 1;
19433 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19434 + 0x83 | DESCTYPE_S, 0xC);
19435 write_gdt_entry(get_cpu_gdt_table(cpu),
19436 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19437 #endif
19438 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19439 /* alrighty, percpu areas up and running */
19440 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19441 for_each_possible_cpu(cpu) {
19442 +#ifdef CONFIG_CC_STACKPROTECTOR
19443 +#ifdef CONFIG_X86_32
19444 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19445 +#endif
19446 +#endif
19447 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19448 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19449 per_cpu(cpu_number, cpu) = cpu;
19450 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19451 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19452 #endif
19453 #endif
19454 +#ifdef CONFIG_CC_STACKPROTECTOR
19455 +#ifdef CONFIG_X86_32
19456 + if (!cpu)
19457 + per_cpu(stack_canary.canary, cpu) = canary;
19458 +#endif
19459 +#endif
19460 /*
19461 * Up to this point, the boot CPU has been using .data.init
19462 * area. Reload any changed state for the boot CPU.
19463 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19464 index 6a44a76..a9287a1 100644
19465 --- a/arch/x86/kernel/signal.c
19466 +++ b/arch/x86/kernel/signal.c
19467 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19468 * Align the stack pointer according to the i386 ABI,
19469 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19470 */
19471 - sp = ((sp + 4) & -16ul) - 4;
19472 + sp = ((sp - 12) & -16ul) - 4;
19473 #else /* !CONFIG_X86_32 */
19474 sp = round_down(sp, 16) - 8;
19475 #endif
19476 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19477 * Return an always-bogus address instead so we will die with SIGSEGV.
19478 */
19479 if (onsigstack && !likely(on_sig_stack(sp)))
19480 - return (void __user *)-1L;
19481 + return (__force void __user *)-1L;
19482
19483 /* save i387 state */
19484 if (used_math() && save_i387_xstate(*fpstate) < 0)
19485 - return (void __user *)-1L;
19486 + return (__force void __user *)-1L;
19487
19488 return (void __user *)sp;
19489 }
19490 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19491 }
19492
19493 if (current->mm->context.vdso)
19494 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19495 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19496 else
19497 - restorer = &frame->retcode;
19498 + restorer = (void __user *)&frame->retcode;
19499 if (ka->sa.sa_flags & SA_RESTORER)
19500 restorer = ka->sa.sa_restorer;
19501
19502 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19503 * reasons and because gdb uses it as a signature to notice
19504 * signal handler stack frames.
19505 */
19506 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19507 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19508
19509 if (err)
19510 return -EFAULT;
19511 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19512 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19513
19514 /* Set up to return from userspace. */
19515 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19516 + if (current->mm->context.vdso)
19517 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19518 + else
19519 + restorer = (void __user *)&frame->retcode;
19520 if (ka->sa.sa_flags & SA_RESTORER)
19521 restorer = ka->sa.sa_restorer;
19522 put_user_ex(restorer, &frame->pretcode);
19523 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19524 * reasons and because gdb uses it as a signature to notice
19525 * signal handler stack frames.
19526 */
19527 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19528 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19529 } put_user_catch(err);
19530
19531 if (err)
19532 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19533 int signr;
19534 sigset_t *oldset;
19535
19536 + pax_track_stack();
19537 +
19538 /*
19539 * We want the common case to go fast, which is why we may in certain
19540 * cases get here from kernel mode. Just return without doing anything
19541 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19542 * X86_32: vm86 regs switched out by assembly code before reaching
19543 * here, so testing against kernel CS suffices.
19544 */
19545 - if (!user_mode(regs))
19546 + if (!user_mode_novm(regs))
19547 return;
19548
19549 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19550 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19551 index 7e8e905..64d5c32 100644
19552 --- a/arch/x86/kernel/smpboot.c
19553 +++ b/arch/x86/kernel/smpboot.c
19554 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19555 */
19556 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19557
19558 -void cpu_hotplug_driver_lock()
19559 +void cpu_hotplug_driver_lock(void)
19560 {
19561 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
19562 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
19563 }
19564
19565 -void cpu_hotplug_driver_unlock()
19566 +void cpu_hotplug_driver_unlock(void)
19567 {
19568 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19569 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19570 }
19571
19572 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19573 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19574 * target processor state.
19575 */
19576 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19577 - (unsigned long)stack_start.sp);
19578 + stack_start);
19579
19580 /*
19581 * Run STARTUP IPI loop.
19582 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19583 set_idle_for_cpu(cpu, c_idle.idle);
19584 do_rest:
19585 per_cpu(current_task, cpu) = c_idle.idle;
19586 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19587 #ifdef CONFIG_X86_32
19588 /* Stack for startup_32 can be just as for start_secondary onwards */
19589 irq_ctx_init(cpu);
19590 @@ -750,13 +751,15 @@ do_rest:
19591 #else
19592 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19593 initial_gs = per_cpu_offset(cpu);
19594 - per_cpu(kernel_stack, cpu) =
19595 - (unsigned long)task_stack_page(c_idle.idle) -
19596 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19597 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19598 #endif
19599 +
19600 + pax_open_kernel();
19601 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19602 + pax_close_kernel();
19603 +
19604 initial_code = (unsigned long)start_secondary;
19605 - stack_start.sp = (void *) c_idle.idle->thread.sp;
19606 + stack_start = c_idle.idle->thread.sp;
19607
19608 /* start_ip had better be page-aligned! */
19609 start_ip = setup_trampoline();
19610 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19611
19612 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19613
19614 +#ifdef CONFIG_PAX_PER_CPU_PGD
19615 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19616 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19617 + KERNEL_PGD_PTRS);
19618 +#endif
19619 +
19620 err = do_boot_cpu(apicid, cpu);
19621
19622 if (err) {
19623 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19624 index 3149032..14f1053 100644
19625 --- a/arch/x86/kernel/step.c
19626 +++ b/arch/x86/kernel/step.c
19627 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19628 struct desc_struct *desc;
19629 unsigned long base;
19630
19631 - seg &= ~7UL;
19632 + seg >>= 3;
19633
19634 mutex_lock(&child->mm->context.lock);
19635 - if (unlikely((seg >> 3) >= child->mm->context.size))
19636 + if (unlikely(seg >= child->mm->context.size))
19637 addr = -1L; /* bogus selector, access would fault */
19638 else {
19639 desc = child->mm->context.ldt + seg;
19640 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19641 addr += base;
19642 }
19643 mutex_unlock(&child->mm->context.lock);
19644 - }
19645 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19646 + addr = ktla_ktva(addr);
19647
19648 return addr;
19649 }
19650 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19651 unsigned char opcode[15];
19652 unsigned long addr = convert_ip_to_linear(child, regs);
19653
19654 + if (addr == -EINVAL)
19655 + return 0;
19656 +
19657 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19658 for (i = 0; i < copied; i++) {
19659 switch (opcode[i]) {
19660 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19661
19662 #ifdef CONFIG_X86_64
19663 case 0x40 ... 0x4f:
19664 - if (regs->cs != __USER_CS)
19665 + if ((regs->cs & 0xffff) != __USER_CS)
19666 /* 32-bit mode: register increment */
19667 return 0;
19668 /* 64-bit mode: REX prefix */
19669 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19670 index dee1ff7..a397f7f 100644
19671 --- a/arch/x86/kernel/sys_i386_32.c
19672 +++ b/arch/x86/kernel/sys_i386_32.c
19673 @@ -24,6 +24,21 @@
19674
19675 #include <asm/syscalls.h>
19676
19677 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19678 +{
19679 + unsigned long pax_task_size = TASK_SIZE;
19680 +
19681 +#ifdef CONFIG_PAX_SEGMEXEC
19682 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19683 + pax_task_size = SEGMEXEC_TASK_SIZE;
19684 +#endif
19685 +
19686 + if (len > pax_task_size || addr > pax_task_size - len)
19687 + return -EINVAL;
19688 +
19689 + return 0;
19690 +}
19691 +
19692 /*
19693 * Perform the select(nd, in, out, ex, tv) and mmap() system
19694 * calls. Linux/i386 didn't use to be able to handle more than
19695 @@ -58,6 +73,212 @@ out:
19696 return err;
19697 }
19698
19699 +unsigned long
19700 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19701 + unsigned long len, unsigned long pgoff, unsigned long flags)
19702 +{
19703 + struct mm_struct *mm = current->mm;
19704 + struct vm_area_struct *vma;
19705 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19706 +
19707 +#ifdef CONFIG_PAX_SEGMEXEC
19708 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19709 + pax_task_size = SEGMEXEC_TASK_SIZE;
19710 +#endif
19711 +
19712 + pax_task_size -= PAGE_SIZE;
19713 +
19714 + if (len > pax_task_size)
19715 + return -ENOMEM;
19716 +
19717 + if (flags & MAP_FIXED)
19718 + return addr;
19719 +
19720 +#ifdef CONFIG_PAX_RANDMMAP
19721 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19722 +#endif
19723 +
19724 + if (addr) {
19725 + addr = PAGE_ALIGN(addr);
19726 + if (pax_task_size - len >= addr) {
19727 + vma = find_vma(mm, addr);
19728 + if (check_heap_stack_gap(vma, addr, len))
19729 + return addr;
19730 + }
19731 + }
19732 + if (len > mm->cached_hole_size) {
19733 + start_addr = addr = mm->free_area_cache;
19734 + } else {
19735 + start_addr = addr = mm->mmap_base;
19736 + mm->cached_hole_size = 0;
19737 + }
19738 +
19739 +#ifdef CONFIG_PAX_PAGEEXEC
19740 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19741 + start_addr = 0x00110000UL;
19742 +
19743 +#ifdef CONFIG_PAX_RANDMMAP
19744 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19745 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19746 +#endif
19747 +
19748 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19749 + start_addr = addr = mm->mmap_base;
19750 + else
19751 + addr = start_addr;
19752 + }
19753 +#endif
19754 +
19755 +full_search:
19756 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19757 + /* At this point: (!vma || addr < vma->vm_end). */
19758 + if (pax_task_size - len < addr) {
19759 + /*
19760 + * Start a new search - just in case we missed
19761 + * some holes.
19762 + */
19763 + if (start_addr != mm->mmap_base) {
19764 + start_addr = addr = mm->mmap_base;
19765 + mm->cached_hole_size = 0;
19766 + goto full_search;
19767 + }
19768 + return -ENOMEM;
19769 + }
19770 + if (check_heap_stack_gap(vma, addr, len))
19771 + break;
19772 + if (addr + mm->cached_hole_size < vma->vm_start)
19773 + mm->cached_hole_size = vma->vm_start - addr;
19774 + addr = vma->vm_end;
19775 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19776 + start_addr = addr = mm->mmap_base;
19777 + mm->cached_hole_size = 0;
19778 + goto full_search;
19779 + }
19780 + }
19781 +
19782 + /*
19783 + * Remember the place where we stopped the search:
19784 + */
19785 + mm->free_area_cache = addr + len;
19786 + return addr;
19787 +}
19788 +
19789 +unsigned long
19790 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19791 + const unsigned long len, const unsigned long pgoff,
19792 + const unsigned long flags)
19793 +{
19794 + struct vm_area_struct *vma;
19795 + struct mm_struct *mm = current->mm;
19796 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19797 +
19798 +#ifdef CONFIG_PAX_SEGMEXEC
19799 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19800 + pax_task_size = SEGMEXEC_TASK_SIZE;
19801 +#endif
19802 +
19803 + pax_task_size -= PAGE_SIZE;
19804 +
19805 + /* requested length too big for entire address space */
19806 + if (len > pax_task_size)
19807 + return -ENOMEM;
19808 +
19809 + if (flags & MAP_FIXED)
19810 + return addr;
19811 +
19812 +#ifdef CONFIG_PAX_PAGEEXEC
19813 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19814 + goto bottomup;
19815 +#endif
19816 +
19817 +#ifdef CONFIG_PAX_RANDMMAP
19818 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19819 +#endif
19820 +
19821 + /* requesting a specific address */
19822 + if (addr) {
19823 + addr = PAGE_ALIGN(addr);
19824 + if (pax_task_size - len >= addr) {
19825 + vma = find_vma(mm, addr);
19826 + if (check_heap_stack_gap(vma, addr, len))
19827 + return addr;
19828 + }
19829 + }
19830 +
19831 + /* check if free_area_cache is useful for us */
19832 + if (len <= mm->cached_hole_size) {
19833 + mm->cached_hole_size = 0;
19834 + mm->free_area_cache = mm->mmap_base;
19835 + }
19836 +
19837 + /* either no address requested or can't fit in requested address hole */
19838 + addr = mm->free_area_cache;
19839 +
19840 + /* make sure it can fit in the remaining address space */
19841 + if (addr > len) {
19842 + vma = find_vma(mm, addr-len);
19843 + if (check_heap_stack_gap(vma, addr - len, len))
19844 + /* remember the address as a hint for next time */
19845 + return (mm->free_area_cache = addr-len);
19846 + }
19847 +
19848 + if (mm->mmap_base < len)
19849 + goto bottomup;
19850 +
19851 + addr = mm->mmap_base-len;
19852 +
19853 + do {
19854 + /*
19855 + * Lookup failure means no vma is above this address,
19856 + * else if new region fits below vma->vm_start,
19857 + * return with success:
19858 + */
19859 + vma = find_vma(mm, addr);
19860 + if (check_heap_stack_gap(vma, addr, len))
19861 + /* remember the address as a hint for next time */
19862 + return (mm->free_area_cache = addr);
19863 +
19864 + /* remember the largest hole we saw so far */
19865 + if (addr + mm->cached_hole_size < vma->vm_start)
19866 + mm->cached_hole_size = vma->vm_start - addr;
19867 +
19868 + /* try just below the current vma->vm_start */
19869 + addr = skip_heap_stack_gap(vma, len);
19870 + } while (!IS_ERR_VALUE(addr));
19871 +
19872 +bottomup:
19873 + /*
19874 + * A failed mmap() very likely causes application failure,
19875 + * so fall back to the bottom-up function here. This scenario
19876 + * can happen with large stack limits and large mmap()
19877 + * allocations.
19878 + */
19879 +
19880 +#ifdef CONFIG_PAX_SEGMEXEC
19881 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19882 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19883 + else
19884 +#endif
19885 +
19886 + mm->mmap_base = TASK_UNMAPPED_BASE;
19887 +
19888 +#ifdef CONFIG_PAX_RANDMMAP
19889 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19890 + mm->mmap_base += mm->delta_mmap;
19891 +#endif
19892 +
19893 + mm->free_area_cache = mm->mmap_base;
19894 + mm->cached_hole_size = ~0UL;
19895 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19896 + /*
19897 + * Restore the topdown base:
19898 + */
19899 + mm->mmap_base = base;
19900 + mm->free_area_cache = base;
19901 + mm->cached_hole_size = ~0UL;
19902 +
19903 + return addr;
19904 +}
19905
19906 struct sel_arg_struct {
19907 unsigned long n;
19908 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19909 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19910 case SEMTIMEDOP:
19911 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19912 - (const struct timespec __user *)fifth);
19913 + (__force const struct timespec __user *)fifth);
19914
19915 case SEMGET:
19916 return sys_semget(first, second, third);
19917 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19918 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19919 if (ret)
19920 return ret;
19921 - return put_user(raddr, (ulong __user *) third);
19922 + return put_user(raddr, (__force ulong __user *) third);
19923 }
19924 case 1: /* iBCS2 emulator entry point */
19925 if (!segment_eq(get_fs(), get_ds()))
19926 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19927
19928 return error;
19929 }
19930 -
19931 -
19932 -/*
19933 - * Do a system call from kernel instead of calling sys_execve so we
19934 - * end up with proper pt_regs.
19935 - */
19936 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19937 -{
19938 - long __res;
19939 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19940 - : "=a" (__res)
19941 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19942 - return __res;
19943 -}
19944 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19945 index 8aa2057..b604bc1 100644
19946 --- a/arch/x86/kernel/sys_x86_64.c
19947 +++ b/arch/x86/kernel/sys_x86_64.c
19948 @@ -32,8 +32,8 @@ out:
19949 return error;
19950 }
19951
19952 -static void find_start_end(unsigned long flags, unsigned long *begin,
19953 - unsigned long *end)
19954 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19955 + unsigned long *begin, unsigned long *end)
19956 {
19957 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19958 unsigned long new_begin;
19959 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19960 *begin = new_begin;
19961 }
19962 } else {
19963 - *begin = TASK_UNMAPPED_BASE;
19964 + *begin = mm->mmap_base;
19965 *end = TASK_SIZE;
19966 }
19967 }
19968 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19969 if (flags & MAP_FIXED)
19970 return addr;
19971
19972 - find_start_end(flags, &begin, &end);
19973 + find_start_end(mm, flags, &begin, &end);
19974
19975 if (len > end)
19976 return -ENOMEM;
19977
19978 +#ifdef CONFIG_PAX_RANDMMAP
19979 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19980 +#endif
19981 +
19982 if (addr) {
19983 addr = PAGE_ALIGN(addr);
19984 vma = find_vma(mm, addr);
19985 - if (end - len >= addr &&
19986 - (!vma || addr + len <= vma->vm_start))
19987 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19988 return addr;
19989 }
19990 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19991 @@ -106,7 +109,7 @@ full_search:
19992 }
19993 return -ENOMEM;
19994 }
19995 - if (!vma || addr + len <= vma->vm_start) {
19996 + if (check_heap_stack_gap(vma, addr, len)) {
19997 /*
19998 * Remember the place where we stopped the search:
19999 */
20000 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20001 {
20002 struct vm_area_struct *vma;
20003 struct mm_struct *mm = current->mm;
20004 - unsigned long addr = addr0;
20005 + unsigned long base = mm->mmap_base, addr = addr0;
20006
20007 /* requested length too big for entire address space */
20008 if (len > TASK_SIZE)
20009 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20010 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
20011 goto bottomup;
20012
20013 +#ifdef CONFIG_PAX_RANDMMAP
20014 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20015 +#endif
20016 +
20017 /* requesting a specific address */
20018 if (addr) {
20019 addr = PAGE_ALIGN(addr);
20020 - vma = find_vma(mm, addr);
20021 - if (TASK_SIZE - len >= addr &&
20022 - (!vma || addr + len <= vma->vm_start))
20023 - return addr;
20024 + if (TASK_SIZE - len >= addr) {
20025 + vma = find_vma(mm, addr);
20026 + if (check_heap_stack_gap(vma, addr, len))
20027 + return addr;
20028 + }
20029 }
20030
20031 /* check if free_area_cache is useful for us */
20032 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20033 /* make sure it can fit in the remaining address space */
20034 if (addr > len) {
20035 vma = find_vma(mm, addr-len);
20036 - if (!vma || addr <= vma->vm_start)
20037 + if (check_heap_stack_gap(vma, addr - len, len))
20038 /* remember the address as a hint for next time */
20039 return mm->free_area_cache = addr-len;
20040 }
20041 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20042 * return with success:
20043 */
20044 vma = find_vma(mm, addr);
20045 - if (!vma || addr+len <= vma->vm_start)
20046 + if (check_heap_stack_gap(vma, addr, len))
20047 /* remember the address as a hint for next time */
20048 return mm->free_area_cache = addr;
20049
20050 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20051 mm->cached_hole_size = vma->vm_start - addr;
20052
20053 /* try just below the current vma->vm_start */
20054 - addr = vma->vm_start-len;
20055 - } while (len < vma->vm_start);
20056 + addr = skip_heap_stack_gap(vma, len);
20057 + } while (!IS_ERR_VALUE(addr));
20058
20059 bottomup:
20060 /*
20061 @@ -198,13 +206,21 @@ bottomup:
20062 * can happen with large stack limits and large mmap()
20063 * allocations.
20064 */
20065 + mm->mmap_base = TASK_UNMAPPED_BASE;
20066 +
20067 +#ifdef CONFIG_PAX_RANDMMAP
20068 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20069 + mm->mmap_base += mm->delta_mmap;
20070 +#endif
20071 +
20072 + mm->free_area_cache = mm->mmap_base;
20073 mm->cached_hole_size = ~0UL;
20074 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20075 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20076 /*
20077 * Restore the topdown base:
20078 */
20079 - mm->free_area_cache = mm->mmap_base;
20080 + mm->mmap_base = base;
20081 + mm->free_area_cache = base;
20082 mm->cached_hole_size = ~0UL;
20083
20084 return addr;
20085 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
20086 index 76d70a4..4c94a44 100644
20087 --- a/arch/x86/kernel/syscall_table_32.S
20088 +++ b/arch/x86/kernel/syscall_table_32.S
20089 @@ -1,3 +1,4 @@
20090 +.section .rodata,"a",@progbits
20091 ENTRY(sys_call_table)
20092 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
20093 .long sys_exit
20094 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
20095 index 46b8277..3349d55 100644
20096 --- a/arch/x86/kernel/tboot.c
20097 +++ b/arch/x86/kernel/tboot.c
20098 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
20099
20100 void tboot_shutdown(u32 shutdown_type)
20101 {
20102 - void (*shutdown)(void);
20103 + void (* __noreturn shutdown)(void);
20104
20105 if (!tboot_enabled())
20106 return;
20107 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
20108
20109 switch_to_tboot_pt();
20110
20111 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
20112 + shutdown = (void *)tboot->shutdown_entry;
20113 shutdown();
20114
20115 /* should not reach here */
20116 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
20117 tboot_shutdown(acpi_shutdown_map[sleep_state]);
20118 }
20119
20120 -static atomic_t ap_wfs_count;
20121 +static atomic_unchecked_t ap_wfs_count;
20122
20123 static int tboot_wait_for_aps(int num_aps)
20124 {
20125 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
20126 {
20127 switch (action) {
20128 case CPU_DYING:
20129 - atomic_inc(&ap_wfs_count);
20130 + atomic_inc_unchecked(&ap_wfs_count);
20131 if (num_online_cpus() == 1)
20132 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
20133 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
20134 return NOTIFY_BAD;
20135 break;
20136 }
20137 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
20138
20139 tboot_create_trampoline();
20140
20141 - atomic_set(&ap_wfs_count, 0);
20142 + atomic_set_unchecked(&ap_wfs_count, 0);
20143 register_hotcpu_notifier(&tboot_cpu_notifier);
20144 return 0;
20145 }
20146 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
20147 index be25734..87fe232 100644
20148 --- a/arch/x86/kernel/time.c
20149 +++ b/arch/x86/kernel/time.c
20150 @@ -26,17 +26,13 @@
20151 int timer_ack;
20152 #endif
20153
20154 -#ifdef CONFIG_X86_64
20155 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
20156 -#endif
20157 -
20158 unsigned long profile_pc(struct pt_regs *regs)
20159 {
20160 unsigned long pc = instruction_pointer(regs);
20161
20162 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
20163 + if (!user_mode(regs) && in_lock_functions(pc)) {
20164 #ifdef CONFIG_FRAME_POINTER
20165 - return *(unsigned long *)(regs->bp + sizeof(long));
20166 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
20167 #else
20168 unsigned long *sp =
20169 (unsigned long *)kernel_stack_pointer(regs);
20170 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
20171 * or above a saved flags. Eflags has bits 22-31 zero,
20172 * kernel addresses don't.
20173 */
20174 +
20175 +#ifdef CONFIG_PAX_KERNEXEC
20176 + return ktla_ktva(sp[0]);
20177 +#else
20178 if (sp[0] >> 22)
20179 return sp[0];
20180 if (sp[1] >> 22)
20181 return sp[1];
20182 #endif
20183 +
20184 +#endif
20185 }
20186 return pc;
20187 }
20188 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
20189 index 6bb7b85..dd853e1 100644
20190 --- a/arch/x86/kernel/tls.c
20191 +++ b/arch/x86/kernel/tls.c
20192 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
20193 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
20194 return -EINVAL;
20195
20196 +#ifdef CONFIG_PAX_SEGMEXEC
20197 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
20198 + return -EINVAL;
20199 +#endif
20200 +
20201 set_tls_desc(p, idx, &info, 1);
20202
20203 return 0;
20204 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
20205 index 8508237..229b664 100644
20206 --- a/arch/x86/kernel/trampoline_32.S
20207 +++ b/arch/x86/kernel/trampoline_32.S
20208 @@ -32,6 +32,12 @@
20209 #include <asm/segment.h>
20210 #include <asm/page_types.h>
20211
20212 +#ifdef CONFIG_PAX_KERNEXEC
20213 +#define ta(X) (X)
20214 +#else
20215 +#define ta(X) ((X) - __PAGE_OFFSET)
20216 +#endif
20217 +
20218 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
20219 __CPUINITRODATA
20220 .code16
20221 @@ -60,7 +66,7 @@ r_base = .
20222 inc %ax # protected mode (PE) bit
20223 lmsw %ax # into protected mode
20224 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
20225 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
20226 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
20227
20228 # These need to be in the same 64K segment as the above;
20229 # hence we don't use the boot_gdt_descr defined in head.S
20230 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
20231 index 3af2dff..ba8aa49 100644
20232 --- a/arch/x86/kernel/trampoline_64.S
20233 +++ b/arch/x86/kernel/trampoline_64.S
20234 @@ -91,7 +91,7 @@ startup_32:
20235 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
20236 movl %eax, %ds
20237
20238 - movl $X86_CR4_PAE, %eax
20239 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20240 movl %eax, %cr4 # Enable PAE mode
20241
20242 # Setup trampoline 4 level pagetables
20243 @@ -127,7 +127,7 @@ startup_64:
20244 no_longmode:
20245 hlt
20246 jmp no_longmode
20247 -#include "verify_cpu_64.S"
20248 +#include "verify_cpu.S"
20249
20250 # Careful these need to be in the same 64K segment as the above;
20251 tidt:
20252 @@ -138,7 +138,7 @@ tidt:
20253 # so the kernel can live anywhere
20254 .balign 4
20255 tgdt:
20256 - .short tgdt_end - tgdt # gdt limit
20257 + .short tgdt_end - tgdt - 1 # gdt limit
20258 .long tgdt - r_base
20259 .short 0
20260 .quad 0x00cf9b000000ffff # __KERNEL32_CS
20261 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
20262 index 7e37dce..ec3f8e5 100644
20263 --- a/arch/x86/kernel/traps.c
20264 +++ b/arch/x86/kernel/traps.c
20265 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
20266
20267 /* Do we ignore FPU interrupts ? */
20268 char ignore_fpu_irq;
20269 -
20270 -/*
20271 - * The IDT has to be page-aligned to simplify the Pentium
20272 - * F0 0F bug workaround.
20273 - */
20274 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20275 #endif
20276
20277 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20278 @@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20279 static inline void
20280 die_if_kernel(const char *str, struct pt_regs *regs, long err)
20281 {
20282 - if (!user_mode_vm(regs))
20283 + if (!user_mode(regs))
20284 die(str, regs, err);
20285 }
20286 #endif
20287
20288 static void __kprobes
20289 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20290 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20291 long error_code, siginfo_t *info)
20292 {
20293 struct task_struct *tsk = current;
20294
20295 #ifdef CONFIG_X86_32
20296 - if (regs->flags & X86_VM_MASK) {
20297 + if (v8086_mode(regs)) {
20298 /*
20299 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20300 * On nmi (interrupt 2), do_trap should not be called.
20301 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20302 }
20303 #endif
20304
20305 - if (!user_mode(regs))
20306 + if (!user_mode_novm(regs))
20307 goto kernel_trap;
20308
20309 #ifdef CONFIG_X86_32
20310 @@ -158,7 +152,7 @@ trap_signal:
20311 printk_ratelimit()) {
20312 printk(KERN_INFO
20313 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20314 - tsk->comm, tsk->pid, str,
20315 + tsk->comm, task_pid_nr(tsk), str,
20316 regs->ip, regs->sp, error_code);
20317 print_vma_addr(" in ", regs->ip);
20318 printk("\n");
20319 @@ -175,8 +169,20 @@ kernel_trap:
20320 if (!fixup_exception(regs)) {
20321 tsk->thread.error_code = error_code;
20322 tsk->thread.trap_no = trapnr;
20323 +
20324 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20325 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20326 + str = "PAX: suspicious stack segment fault";
20327 +#endif
20328 +
20329 die(str, regs, error_code);
20330 }
20331 +
20332 +#ifdef CONFIG_PAX_REFCOUNT
20333 + if (trapnr == 4)
20334 + pax_report_refcount_overflow(regs);
20335 +#endif
20336 +
20337 return;
20338
20339 #ifdef CONFIG_X86_32
20340 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20341 conditional_sti(regs);
20342
20343 #ifdef CONFIG_X86_32
20344 - if (regs->flags & X86_VM_MASK)
20345 + if (v8086_mode(regs))
20346 goto gp_in_vm86;
20347 #endif
20348
20349 tsk = current;
20350 - if (!user_mode(regs))
20351 + if (!user_mode_novm(regs))
20352 goto gp_in_kernel;
20353
20354 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20355 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20356 + struct mm_struct *mm = tsk->mm;
20357 + unsigned long limit;
20358 +
20359 + down_write(&mm->mmap_sem);
20360 + limit = mm->context.user_cs_limit;
20361 + if (limit < TASK_SIZE) {
20362 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20363 + up_write(&mm->mmap_sem);
20364 + return;
20365 + }
20366 + up_write(&mm->mmap_sem);
20367 + }
20368 +#endif
20369 +
20370 tsk->thread.error_code = error_code;
20371 tsk->thread.trap_no = 13;
20372
20373 @@ -305,6 +327,13 @@ gp_in_kernel:
20374 if (notify_die(DIE_GPF, "general protection fault", regs,
20375 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20376 return;
20377 +
20378 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20379 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20380 + die("PAX: suspicious general protection fault", regs, error_code);
20381 + else
20382 +#endif
20383 +
20384 die("general protection fault", regs, error_code);
20385 }
20386
20387 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20388 dotraplinkage notrace __kprobes void
20389 do_nmi(struct pt_regs *regs, long error_code)
20390 {
20391 +
20392 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20393 + if (!user_mode(regs)) {
20394 + unsigned long cs = regs->cs & 0xFFFF;
20395 + unsigned long ip = ktva_ktla(regs->ip);
20396 +
20397 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20398 + regs->ip = ip;
20399 + }
20400 +#endif
20401 +
20402 nmi_enter();
20403
20404 inc_irq_stat(__nmi_count);
20405 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20406 }
20407
20408 #ifdef CONFIG_X86_32
20409 - if (regs->flags & X86_VM_MASK)
20410 + if (v8086_mode(regs))
20411 goto debug_vm86;
20412 #endif
20413
20414 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20415 * kernel space (but re-enable TF when returning to user mode).
20416 */
20417 if (condition & DR_STEP) {
20418 - if (!user_mode(regs))
20419 + if (!user_mode_novm(regs))
20420 goto clear_TF_reenable;
20421 }
20422
20423 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20424 * Handle strange cache flush from user space exception
20425 * in all other cases. This is undocumented behaviour.
20426 */
20427 - if (regs->flags & X86_VM_MASK) {
20428 + if (v8086_mode(regs)) {
20429 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20430 return;
20431 }
20432 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20433 void __math_state_restore(void)
20434 {
20435 struct thread_info *thread = current_thread_info();
20436 - struct task_struct *tsk = thread->task;
20437 + struct task_struct *tsk = current;
20438
20439 /*
20440 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20441 @@ -825,8 +865,7 @@ void __math_state_restore(void)
20442 */
20443 asmlinkage void math_state_restore(void)
20444 {
20445 - struct thread_info *thread = current_thread_info();
20446 - struct task_struct *tsk = thread->task;
20447 + struct task_struct *tsk = current;
20448
20449 if (!tsk_used_math(tsk)) {
20450 local_irq_enable();
20451 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20452 new file mode 100644
20453 index 0000000..50c5edd
20454 --- /dev/null
20455 +++ b/arch/x86/kernel/verify_cpu.S
20456 @@ -0,0 +1,140 @@
20457 +/*
20458 + *
20459 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
20460 + * code has been borrowed from boot/setup.S and was introduced by
20461 + * Andi Kleen.
20462 + *
20463 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20464 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20465 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20466 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20467 + *
20468 + * This source code is licensed under the GNU General Public License,
20469 + * Version 2. See the file COPYING for more details.
20470 + *
20471 + * This is a common code for verification whether CPU supports
20472 + * long mode and SSE or not. It is not called directly instead this
20473 + * file is included at various places and compiled in that context.
20474 + * This file is expected to run in 32bit code. Currently:
20475 + *
20476 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20477 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
20478 + * arch/x86/kernel/head_32.S: processor startup
20479 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20480 + *
20481 + * verify_cpu, returns the status of longmode and SSE in register %eax.
20482 + * 0: Success 1: Failure
20483 + *
20484 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20485 + *
20486 + * The caller needs to check for the error code and take the action
20487 + * appropriately. Either display a message or halt.
20488 + */
20489 +
20490 +#include <asm/cpufeature.h>
20491 +#include <asm/msr-index.h>
20492 +
20493 +verify_cpu:
20494 + pushfl # Save caller passed flags
20495 + pushl $0 # Kill any dangerous flags
20496 + popfl
20497 +
20498 + pushfl # standard way to check for cpuid
20499 + popl %eax
20500 + movl %eax,%ebx
20501 + xorl $0x200000,%eax
20502 + pushl %eax
20503 + popfl
20504 + pushfl
20505 + popl %eax
20506 + cmpl %eax,%ebx
20507 + jz verify_cpu_no_longmode # cpu has no cpuid
20508 +
20509 + movl $0x0,%eax # See if cpuid 1 is implemented
20510 + cpuid
20511 + cmpl $0x1,%eax
20512 + jb verify_cpu_no_longmode # no cpuid 1
20513 +
20514 + xor %di,%di
20515 + cmpl $0x68747541,%ebx # AuthenticAMD
20516 + jnz verify_cpu_noamd
20517 + cmpl $0x69746e65,%edx
20518 + jnz verify_cpu_noamd
20519 + cmpl $0x444d4163,%ecx
20520 + jnz verify_cpu_noamd
20521 + mov $1,%di # cpu is from AMD
20522 + jmp verify_cpu_check
20523 +
20524 +verify_cpu_noamd:
20525 + cmpl $0x756e6547,%ebx # GenuineIntel?
20526 + jnz verify_cpu_check
20527 + cmpl $0x49656e69,%edx
20528 + jnz verify_cpu_check
20529 + cmpl $0x6c65746e,%ecx
20530 + jnz verify_cpu_check
20531 +
20532 + # only call IA32_MISC_ENABLE when:
20533 + # family > 6 || (family == 6 && model >= 0xd)
20534 + movl $0x1, %eax # check CPU family and model
20535 + cpuid
20536 + movl %eax, %ecx
20537 +
20538 + andl $0x0ff00f00, %eax # mask family and extended family
20539 + shrl $8, %eax
20540 + cmpl $6, %eax
20541 + ja verify_cpu_clear_xd # family > 6, ok
20542 + jb verify_cpu_check # family < 6, skip
20543 +
20544 + andl $0x000f00f0, %ecx # mask model and extended model
20545 + shrl $4, %ecx
20546 + cmpl $0xd, %ecx
20547 + jb verify_cpu_check # family == 6, model < 0xd, skip
20548 +
20549 +verify_cpu_clear_xd:
20550 + movl $MSR_IA32_MISC_ENABLE, %ecx
20551 + rdmsr
20552 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20553 + jnc verify_cpu_check # only write MSR if bit was changed
20554 + wrmsr
20555 +
20556 +verify_cpu_check:
20557 + movl $0x1,%eax # Does the cpu have what it takes
20558 + cpuid
20559 + andl $REQUIRED_MASK0,%edx
20560 + xorl $REQUIRED_MASK0,%edx
20561 + jnz verify_cpu_no_longmode
20562 +
20563 + movl $0x80000000,%eax # See if extended cpuid is implemented
20564 + cpuid
20565 + cmpl $0x80000001,%eax
20566 + jb verify_cpu_no_longmode # no extended cpuid
20567 +
20568 + movl $0x80000001,%eax # Does the cpu have what it takes
20569 + cpuid
20570 + andl $REQUIRED_MASK1,%edx
20571 + xorl $REQUIRED_MASK1,%edx
20572 + jnz verify_cpu_no_longmode
20573 +
20574 +verify_cpu_sse_test:
20575 + movl $1,%eax
20576 + cpuid
20577 + andl $SSE_MASK,%edx
20578 + cmpl $SSE_MASK,%edx
20579 + je verify_cpu_sse_ok
20580 + test %di,%di
20581 + jz verify_cpu_no_longmode # only try to force SSE on AMD
20582 + movl $MSR_K7_HWCR,%ecx
20583 + rdmsr
20584 + btr $15,%eax # enable SSE
20585 + wrmsr
20586 + xor %di,%di # don't loop
20587 + jmp verify_cpu_sse_test # try again
20588 +
20589 +verify_cpu_no_longmode:
20590 + popfl # Restore caller passed flags
20591 + movl $1,%eax
20592 + ret
20593 +verify_cpu_sse_ok:
20594 + popfl # Restore caller passed flags
20595 + xorl %eax, %eax
20596 + ret
20597 diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20598 deleted file mode 100644
20599 index 45b6f8a..0000000
20600 --- a/arch/x86/kernel/verify_cpu_64.S
20601 +++ /dev/null
20602 @@ -1,105 +0,0 @@
20603 -/*
20604 - *
20605 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
20606 - * code has been borrowed from boot/setup.S and was introduced by
20607 - * Andi Kleen.
20608 - *
20609 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20610 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20611 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20612 - *
20613 - * This source code is licensed under the GNU General Public License,
20614 - * Version 2. See the file COPYING for more details.
20615 - *
20616 - * This is a common code for verification whether CPU supports
20617 - * long mode and SSE or not. It is not called directly instead this
20618 - * file is included at various places and compiled in that context.
20619 - * Following are the current usage.
20620 - *
20621 - * This file is included by both 16bit and 32bit code.
20622 - *
20623 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20624 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20625 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20626 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20627 - *
20628 - * verify_cpu, returns the status of cpu check in register %eax.
20629 - * 0: Success 1: Failure
20630 - *
20631 - * The caller needs to check for the error code and take the action
20632 - * appropriately. Either display a message or halt.
20633 - */
20634 -
20635 -#include <asm/cpufeature.h>
20636 -
20637 -verify_cpu:
20638 - pushfl # Save caller passed flags
20639 - pushl $0 # Kill any dangerous flags
20640 - popfl
20641 -
20642 - pushfl # standard way to check for cpuid
20643 - popl %eax
20644 - movl %eax,%ebx
20645 - xorl $0x200000,%eax
20646 - pushl %eax
20647 - popfl
20648 - pushfl
20649 - popl %eax
20650 - cmpl %eax,%ebx
20651 - jz verify_cpu_no_longmode # cpu has no cpuid
20652 -
20653 - movl $0x0,%eax # See if cpuid 1 is implemented
20654 - cpuid
20655 - cmpl $0x1,%eax
20656 - jb verify_cpu_no_longmode # no cpuid 1
20657 -
20658 - xor %di,%di
20659 - cmpl $0x68747541,%ebx # AuthenticAMD
20660 - jnz verify_cpu_noamd
20661 - cmpl $0x69746e65,%edx
20662 - jnz verify_cpu_noamd
20663 - cmpl $0x444d4163,%ecx
20664 - jnz verify_cpu_noamd
20665 - mov $1,%di # cpu is from AMD
20666 -
20667 -verify_cpu_noamd:
20668 - movl $0x1,%eax # Does the cpu have what it takes
20669 - cpuid
20670 - andl $REQUIRED_MASK0,%edx
20671 - xorl $REQUIRED_MASK0,%edx
20672 - jnz verify_cpu_no_longmode
20673 -
20674 - movl $0x80000000,%eax # See if extended cpuid is implemented
20675 - cpuid
20676 - cmpl $0x80000001,%eax
20677 - jb verify_cpu_no_longmode # no extended cpuid
20678 -
20679 - movl $0x80000001,%eax # Does the cpu have what it takes
20680 - cpuid
20681 - andl $REQUIRED_MASK1,%edx
20682 - xorl $REQUIRED_MASK1,%edx
20683 - jnz verify_cpu_no_longmode
20684 -
20685 -verify_cpu_sse_test:
20686 - movl $1,%eax
20687 - cpuid
20688 - andl $SSE_MASK,%edx
20689 - cmpl $SSE_MASK,%edx
20690 - je verify_cpu_sse_ok
20691 - test %di,%di
20692 - jz verify_cpu_no_longmode # only try to force SSE on AMD
20693 - movl $0xc0010015,%ecx # HWCR
20694 - rdmsr
20695 - btr $15,%eax # enable SSE
20696 - wrmsr
20697 - xor %di,%di # don't loop
20698 - jmp verify_cpu_sse_test # try again
20699 -
20700 -verify_cpu_no_longmode:
20701 - popfl # Restore caller passed flags
20702 - movl $1,%eax
20703 - ret
20704 -verify_cpu_sse_ok:
20705 - popfl # Restore caller passed flags
20706 - xorl %eax, %eax
20707 - ret
20708 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20709 index 9c4e625..c992817 100644
20710 --- a/arch/x86/kernel/vm86_32.c
20711 +++ b/arch/x86/kernel/vm86_32.c
20712 @@ -41,6 +41,7 @@
20713 #include <linux/ptrace.h>
20714 #include <linux/audit.h>
20715 #include <linux/stddef.h>
20716 +#include <linux/grsecurity.h>
20717
20718 #include <asm/uaccess.h>
20719 #include <asm/io.h>
20720 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20721 do_exit(SIGSEGV);
20722 }
20723
20724 - tss = &per_cpu(init_tss, get_cpu());
20725 + tss = init_tss + get_cpu();
20726 current->thread.sp0 = current->thread.saved_sp0;
20727 current->thread.sysenter_cs = __KERNEL_CS;
20728 load_sp0(tss, &current->thread);
20729 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20730 struct task_struct *tsk;
20731 int tmp, ret = -EPERM;
20732
20733 +#ifdef CONFIG_GRKERNSEC_VM86
20734 + if (!capable(CAP_SYS_RAWIO)) {
20735 + gr_handle_vm86();
20736 + goto out;
20737 + }
20738 +#endif
20739 +
20740 tsk = current;
20741 if (tsk->thread.saved_sp0)
20742 goto out;
20743 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20744 int tmp, ret;
20745 struct vm86plus_struct __user *v86;
20746
20747 +#ifdef CONFIG_GRKERNSEC_VM86
20748 + if (!capable(CAP_SYS_RAWIO)) {
20749 + gr_handle_vm86();
20750 + ret = -EPERM;
20751 + goto out;
20752 + }
20753 +#endif
20754 +
20755 tsk = current;
20756 switch (regs->bx) {
20757 case VM86_REQUEST_IRQ:
20758 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20759 tsk->thread.saved_fs = info->regs32->fs;
20760 tsk->thread.saved_gs = get_user_gs(info->regs32);
20761
20762 - tss = &per_cpu(init_tss, get_cpu());
20763 + tss = init_tss + get_cpu();
20764 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20765 if (cpu_has_sep)
20766 tsk->thread.sysenter_cs = 0;
20767 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20768 goto cannot_handle;
20769 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20770 goto cannot_handle;
20771 - intr_ptr = (unsigned long __user *) (i << 2);
20772 + intr_ptr = (__force unsigned long __user *) (i << 2);
20773 if (get_user(segoffs, intr_ptr))
20774 goto cannot_handle;
20775 if ((segoffs >> 16) == BIOSSEG)
20776 diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20777 index d430e4c..831f817 100644
20778 --- a/arch/x86/kernel/vmi_32.c
20779 +++ b/arch/x86/kernel/vmi_32.c
20780 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20781 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20782
20783 #define call_vrom_func(rom,func) \
20784 - (((VROMFUNC *)(rom->func))())
20785 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
20786
20787 #define call_vrom_long_func(rom,func,arg) \
20788 - (((VROMLONGFUNC *)(rom->func)) (arg))
20789 +({\
20790 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20791 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20792 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20793 + __reloc;\
20794 +})
20795
20796 -static struct vrom_header *vmi_rom;
20797 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20798 static int disable_pge;
20799 static int disable_pse;
20800 static int disable_sep;
20801 @@ -76,10 +81,10 @@ static struct {
20802 void (*set_initial_ap_state)(int, int);
20803 void (*halt)(void);
20804 void (*set_lazy_mode)(int mode);
20805 -} vmi_ops;
20806 +} __no_const vmi_ops __read_only;
20807
20808 /* Cached VMI operations */
20809 -struct vmi_timer_ops vmi_timer_ops;
20810 +struct vmi_timer_ops vmi_timer_ops __read_only;
20811
20812 /*
20813 * VMI patching routines.
20814 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20815 static inline void patch_offset(void *insnbuf,
20816 unsigned long ip, unsigned long dest)
20817 {
20818 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
20819 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
20820 }
20821
20822 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20823 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20824 {
20825 u64 reloc;
20826 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20827 +
20828 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20829 switch(rel->type) {
20830 case VMI_RELOCATION_CALL_REL:
20831 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20832
20833 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20834 {
20835 - const pte_t pte = { .pte = 0 };
20836 + const pte_t pte = __pte(0ULL);
20837 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20838 }
20839
20840 static void vmi_pmd_clear(pmd_t *pmd)
20841 {
20842 - const pte_t pte = { .pte = 0 };
20843 + const pte_t pte = __pte(0ULL);
20844 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20845 }
20846 #endif
20847 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20848 ap.ss = __KERNEL_DS;
20849 ap.esp = (unsigned long) start_esp;
20850
20851 - ap.ds = __USER_DS;
20852 - ap.es = __USER_DS;
20853 + ap.ds = __KERNEL_DS;
20854 + ap.es = __KERNEL_DS;
20855 ap.fs = __KERNEL_PERCPU;
20856 - ap.gs = __KERNEL_STACK_CANARY;
20857 + savesegment(gs, ap.gs);
20858
20859 ap.eflags = 0;
20860
20861 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20862 paravirt_leave_lazy_mmu();
20863 }
20864
20865 +#ifdef CONFIG_PAX_KERNEXEC
20866 +static unsigned long vmi_pax_open_kernel(void)
20867 +{
20868 + return 0;
20869 +}
20870 +
20871 +static unsigned long vmi_pax_close_kernel(void)
20872 +{
20873 + return 0;
20874 +}
20875 +#endif
20876 +
20877 static inline int __init check_vmi_rom(struct vrom_header *rom)
20878 {
20879 struct pci_header *pci;
20880 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20881 return 0;
20882 if (rom->vrom_signature != VMI_SIGNATURE)
20883 return 0;
20884 + if (rom->rom_length * 512 > sizeof(*rom)) {
20885 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20886 + return 0;
20887 + }
20888 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20889 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20890 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20891 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20892 struct vrom_header *romstart;
20893 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20894 if (check_vmi_rom(romstart)) {
20895 - vmi_rom = romstart;
20896 + vmi_rom = *romstart;
20897 return 1;
20898 }
20899 }
20900 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20901
20902 para_fill(pv_irq_ops.safe_halt, Halt);
20903
20904 +#ifdef CONFIG_PAX_KERNEXEC
20905 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20906 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20907 +#endif
20908 +
20909 /*
20910 * Alternative instruction rewriting doesn't happen soon enough
20911 * to convert VMI_IRET to a call instead of a jump; so we have
20912 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20913
20914 void __init vmi_init(void)
20915 {
20916 - if (!vmi_rom)
20917 + if (!vmi_rom.rom_signature)
20918 probe_vmi_rom();
20919 else
20920 - check_vmi_rom(vmi_rom);
20921 + check_vmi_rom(&vmi_rom);
20922
20923 /* In case probing for or validating the ROM failed, basil */
20924 - if (!vmi_rom)
20925 + if (!vmi_rom.rom_signature)
20926 return;
20927
20928 - reserve_top_address(-vmi_rom->virtual_top);
20929 + reserve_top_address(-vmi_rom.virtual_top);
20930
20931 #ifdef CONFIG_X86_IO_APIC
20932 /* This is virtual hardware; timer routing is wired correctly */
20933 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
20934 {
20935 unsigned long flags;
20936
20937 - if (!vmi_rom)
20938 + if (!vmi_rom.rom_signature)
20939 return;
20940
20941 local_irq_save(flags);
20942 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20943 index 3c68fe2..12c8280 100644
20944 --- a/arch/x86/kernel/vmlinux.lds.S
20945 +++ b/arch/x86/kernel/vmlinux.lds.S
20946 @@ -26,6 +26,13 @@
20947 #include <asm/page_types.h>
20948 #include <asm/cache.h>
20949 #include <asm/boot.h>
20950 +#include <asm/segment.h>
20951 +
20952 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20953 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20954 +#else
20955 +#define __KERNEL_TEXT_OFFSET 0
20956 +#endif
20957
20958 #undef i386 /* in case the preprocessor is a 32bit one */
20959
20960 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20961 #ifdef CONFIG_X86_32
20962 OUTPUT_ARCH(i386)
20963 ENTRY(phys_startup_32)
20964 -jiffies = jiffies_64;
20965 #else
20966 OUTPUT_ARCH(i386:x86-64)
20967 ENTRY(phys_startup_64)
20968 -jiffies_64 = jiffies;
20969 #endif
20970
20971 PHDRS {
20972 text PT_LOAD FLAGS(5); /* R_E */
20973 - data PT_LOAD FLAGS(7); /* RWE */
20974 +#ifdef CONFIG_X86_32
20975 + module PT_LOAD FLAGS(5); /* R_E */
20976 +#endif
20977 +#ifdef CONFIG_XEN
20978 + rodata PT_LOAD FLAGS(5); /* R_E */
20979 +#else
20980 + rodata PT_LOAD FLAGS(4); /* R__ */
20981 +#endif
20982 + data PT_LOAD FLAGS(6); /* RW_ */
20983 #ifdef CONFIG_X86_64
20984 user PT_LOAD FLAGS(5); /* R_E */
20985 +#endif
20986 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20987 #ifdef CONFIG_SMP
20988 percpu PT_LOAD FLAGS(6); /* RW_ */
20989 #endif
20990 + text.init PT_LOAD FLAGS(5); /* R_E */
20991 + text.exit PT_LOAD FLAGS(5); /* R_E */
20992 init PT_LOAD FLAGS(7); /* RWE */
20993 -#endif
20994 note PT_NOTE FLAGS(0); /* ___ */
20995 }
20996
20997 SECTIONS
20998 {
20999 #ifdef CONFIG_X86_32
21000 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
21001 - phys_startup_32 = startup_32 - LOAD_OFFSET;
21002 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
21003 #else
21004 - . = __START_KERNEL;
21005 - phys_startup_64 = startup_64 - LOAD_OFFSET;
21006 + . = __START_KERNEL;
21007 #endif
21008
21009 /* Text and read-only data */
21010 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
21011 - _text = .;
21012 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21013 /* bootstrapping code */
21014 +#ifdef CONFIG_X86_32
21015 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21016 +#else
21017 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21018 +#endif
21019 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21020 + _text = .;
21021 HEAD_TEXT
21022 #ifdef CONFIG_X86_32
21023 . = ALIGN(PAGE_SIZE);
21024 @@ -82,28 +102,71 @@ SECTIONS
21025 IRQENTRY_TEXT
21026 *(.fixup)
21027 *(.gnu.warning)
21028 - /* End of text section */
21029 - _etext = .;
21030 } :text = 0x9090
21031
21032 - NOTES :text :note
21033 + . += __KERNEL_TEXT_OFFSET;
21034
21035 - EXCEPTION_TABLE(16) :text = 0x9090
21036 +#ifdef CONFIG_X86_32
21037 + . = ALIGN(PAGE_SIZE);
21038 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
21039 + *(.vmi.rom)
21040 + } :module
21041 +
21042 + . = ALIGN(PAGE_SIZE);
21043 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
21044 +
21045 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
21046 + MODULES_EXEC_VADDR = .;
21047 + BYTE(0)
21048 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
21049 + . = ALIGN(HPAGE_SIZE);
21050 + MODULES_EXEC_END = . - 1;
21051 +#endif
21052 +
21053 + } :module
21054 +#endif
21055 +
21056 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
21057 + /* End of text section */
21058 + _etext = . - __KERNEL_TEXT_OFFSET;
21059 + }
21060 +
21061 +#ifdef CONFIG_X86_32
21062 + . = ALIGN(PAGE_SIZE);
21063 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
21064 + *(.idt)
21065 + . = ALIGN(PAGE_SIZE);
21066 + *(.empty_zero_page)
21067 + *(.swapper_pg_fixmap)
21068 + *(.swapper_pg_pmd)
21069 + *(.swapper_pg_dir)
21070 + *(.trampoline_pg_dir)
21071 + } :rodata
21072 +#endif
21073 +
21074 + . = ALIGN(PAGE_SIZE);
21075 + NOTES :rodata :note
21076 +
21077 + EXCEPTION_TABLE(16) :rodata
21078
21079 RO_DATA(PAGE_SIZE)
21080
21081 /* Data */
21082 .data : AT(ADDR(.data) - LOAD_OFFSET) {
21083 +
21084 +#ifdef CONFIG_PAX_KERNEXEC
21085 + . = ALIGN(HPAGE_SIZE);
21086 +#else
21087 + . = ALIGN(PAGE_SIZE);
21088 +#endif
21089 +
21090 /* Start of data section */
21091 _sdata = .;
21092
21093 /* init_task */
21094 INIT_TASK_DATA(THREAD_SIZE)
21095
21096 -#ifdef CONFIG_X86_32
21097 - /* 32 bit has nosave before _edata */
21098 NOSAVE_DATA
21099 -#endif
21100
21101 PAGE_ALIGNED_DATA(PAGE_SIZE)
21102
21103 @@ -112,6 +175,8 @@ SECTIONS
21104 DATA_DATA
21105 CONSTRUCTORS
21106
21107 + jiffies = jiffies_64;
21108 +
21109 /* rarely changed data like cpu maps */
21110 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
21111
21112 @@ -166,12 +231,6 @@ SECTIONS
21113 }
21114 vgetcpu_mode = VVIRT(.vgetcpu_mode);
21115
21116 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
21117 - .jiffies : AT(VLOAD(.jiffies)) {
21118 - *(.jiffies)
21119 - }
21120 - jiffies = VVIRT(.jiffies);
21121 -
21122 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
21123 *(.vsyscall_3)
21124 }
21125 @@ -187,12 +246,19 @@ SECTIONS
21126 #endif /* CONFIG_X86_64 */
21127
21128 /* Init code and data - will be freed after init */
21129 - . = ALIGN(PAGE_SIZE);
21130 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
21131 + BYTE(0)
21132 +
21133 +#ifdef CONFIG_PAX_KERNEXEC
21134 + . = ALIGN(HPAGE_SIZE);
21135 +#else
21136 + . = ALIGN(PAGE_SIZE);
21137 +#endif
21138 +
21139 __init_begin = .; /* paired with __init_end */
21140 - }
21141 + } :init.begin
21142
21143 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
21144 +#ifdef CONFIG_SMP
21145 /*
21146 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
21147 * output PHDR, so the next output section - .init.text - should
21148 @@ -201,12 +267,27 @@ SECTIONS
21149 PERCPU_VADDR(0, :percpu)
21150 #endif
21151
21152 - INIT_TEXT_SECTION(PAGE_SIZE)
21153 -#ifdef CONFIG_X86_64
21154 - :init
21155 -#endif
21156 + . = ALIGN(PAGE_SIZE);
21157 + init_begin = .;
21158 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
21159 + VMLINUX_SYMBOL(_sinittext) = .;
21160 + INIT_TEXT
21161 + VMLINUX_SYMBOL(_einittext) = .;
21162 + . = ALIGN(PAGE_SIZE);
21163 + } :text.init
21164
21165 - INIT_DATA_SECTION(16)
21166 + /*
21167 + * .exit.text is discard at runtime, not link time, to deal with
21168 + * references from .altinstructions and .eh_frame
21169 + */
21170 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21171 + EXIT_TEXT
21172 + . = ALIGN(16);
21173 + } :text.exit
21174 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
21175 +
21176 + . = ALIGN(PAGE_SIZE);
21177 + INIT_DATA_SECTION(16) :init
21178
21179 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
21180 __x86_cpu_dev_start = .;
21181 @@ -232,19 +313,11 @@ SECTIONS
21182 *(.altinstr_replacement)
21183 }
21184
21185 - /*
21186 - * .exit.text is discard at runtime, not link time, to deal with
21187 - * references from .altinstructions and .eh_frame
21188 - */
21189 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
21190 - EXIT_TEXT
21191 - }
21192 -
21193 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
21194 EXIT_DATA
21195 }
21196
21197 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
21198 +#ifndef CONFIG_SMP
21199 PERCPU(PAGE_SIZE)
21200 #endif
21201
21202 @@ -267,12 +340,6 @@ SECTIONS
21203 . = ALIGN(PAGE_SIZE);
21204 }
21205
21206 -#ifdef CONFIG_X86_64
21207 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
21208 - NOSAVE_DATA
21209 - }
21210 -#endif
21211 -
21212 /* BSS */
21213 . = ALIGN(PAGE_SIZE);
21214 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
21215 @@ -288,6 +355,7 @@ SECTIONS
21216 __brk_base = .;
21217 . += 64 * 1024; /* 64k alignment slop space */
21218 *(.brk_reservation) /* areas brk users have reserved */
21219 + . = ALIGN(HPAGE_SIZE);
21220 __brk_limit = .;
21221 }
21222
21223 @@ -316,13 +384,12 @@ SECTIONS
21224 * for the boot processor.
21225 */
21226 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
21227 -INIT_PER_CPU(gdt_page);
21228 INIT_PER_CPU(irq_stack_union);
21229
21230 /*
21231 * Build-time check on the image size:
21232 */
21233 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
21234 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
21235 "kernel image bigger than KERNEL_IMAGE_SIZE");
21236
21237 #ifdef CONFIG_SMP
21238 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
21239 index 62f39d7..3bc46a1 100644
21240 --- a/arch/x86/kernel/vsyscall_64.c
21241 +++ b/arch/x86/kernel/vsyscall_64.c
21242 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
21243
21244 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
21245 /* copy vsyscall data */
21246 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
21247 vsyscall_gtod_data.clock.vread = clock->vread;
21248 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
21249 vsyscall_gtod_data.clock.mask = clock->mask;
21250 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
21251 We do this here because otherwise user space would do it on
21252 its own in a likely inferior way (no access to jiffies).
21253 If you don't like it pass NULL. */
21254 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
21255 + if (tcache && tcache->blob[0] == (j = jiffies)) {
21256 p = tcache->blob[1];
21257 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
21258 /* Load per CPU data from RDTSCP */
21259 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21260 index 3909e3b..5433a97 100644
21261 --- a/arch/x86/kernel/x8664_ksyms_64.c
21262 +++ b/arch/x86/kernel/x8664_ksyms_64.c
21263 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
21264
21265 EXPORT_SYMBOL(copy_user_generic);
21266 EXPORT_SYMBOL(__copy_user_nocache);
21267 -EXPORT_SYMBOL(copy_from_user);
21268 -EXPORT_SYMBOL(copy_to_user);
21269 EXPORT_SYMBOL(__copy_from_user_inatomic);
21270
21271 EXPORT_SYMBOL(copy_page);
21272 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21273 index c5ee17e..d63218f 100644
21274 --- a/arch/x86/kernel/xsave.c
21275 +++ b/arch/x86/kernel/xsave.c
21276 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
21277 fx_sw_user->xstate_size > fx_sw_user->extended_size)
21278 return -1;
21279
21280 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
21281 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
21282 fx_sw_user->extended_size -
21283 FP_XSTATE_MAGIC2_SIZE));
21284 /*
21285 @@ -196,7 +196,7 @@ fx_only:
21286 * the other extended state.
21287 */
21288 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
21289 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
21290 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
21291 }
21292
21293 /*
21294 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21295 if (task_thread_info(tsk)->status & TS_XSAVE)
21296 err = restore_user_xstate(buf);
21297 else
21298 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
21299 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
21300 buf);
21301 if (unlikely(err)) {
21302 /*
21303 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21304 index 1350e43..a94b011 100644
21305 --- a/arch/x86/kvm/emulate.c
21306 +++ b/arch/x86/kvm/emulate.c
21307 @@ -81,8 +81,8 @@
21308 #define Src2CL (1<<29)
21309 #define Src2ImmByte (2<<29)
21310 #define Src2One (3<<29)
21311 -#define Src2Imm16 (4<<29)
21312 -#define Src2Mask (7<<29)
21313 +#define Src2Imm16 (4U<<29)
21314 +#define Src2Mask (7U<<29)
21315
21316 enum {
21317 Group1_80, Group1_81, Group1_82, Group1_83,
21318 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
21319
21320 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21321 do { \
21322 + unsigned long _tmp; \
21323 __asm__ __volatile__ ( \
21324 _PRE_EFLAGS("0", "4", "2") \
21325 _op _suffix " %"_x"3,%1; " \
21326 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
21327 /* Raw emulation: instruction has two explicit operands. */
21328 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21329 do { \
21330 - unsigned long _tmp; \
21331 - \
21332 switch ((_dst).bytes) { \
21333 case 2: \
21334 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21335 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
21336
21337 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21338 do { \
21339 - unsigned long _tmp; \
21340 switch ((_dst).bytes) { \
21341 case 1: \
21342 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21343 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21344 index 8dfeaaa..4daa395 100644
21345 --- a/arch/x86/kvm/lapic.c
21346 +++ b/arch/x86/kvm/lapic.c
21347 @@ -52,7 +52,7 @@
21348 #define APIC_BUS_CYCLE_NS 1
21349
21350 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21351 -#define apic_debug(fmt, arg...)
21352 +#define apic_debug(fmt, arg...) do {} while (0)
21353
21354 #define APIC_LVT_NUM 6
21355 /* 14 is the version for Xeon and Pentium 8.4.8*/
21356 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21357 index 3bc2707..dd157e2 100644
21358 --- a/arch/x86/kvm/paging_tmpl.h
21359 +++ b/arch/x86/kvm/paging_tmpl.h
21360 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21361 int level = PT_PAGE_TABLE_LEVEL;
21362 unsigned long mmu_seq;
21363
21364 + pax_track_stack();
21365 +
21366 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21367 kvm_mmu_audit(vcpu, "pre page fault");
21368
21369 @@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21370 kvm_mmu_free_some_pages(vcpu);
21371 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21372 level, &write_pt, pfn);
21373 + (void)sptep;
21374 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21375 sptep, *sptep, write_pt);
21376
21377 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21378 index 7c6e63e..c5d92c1 100644
21379 --- a/arch/x86/kvm/svm.c
21380 +++ b/arch/x86/kvm/svm.c
21381 @@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21382 int cpu = raw_smp_processor_id();
21383
21384 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21385 +
21386 + pax_open_kernel();
21387 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21388 + pax_close_kernel();
21389 +
21390 load_TR_desc();
21391 }
21392
21393 @@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21394 return true;
21395 }
21396
21397 -static struct kvm_x86_ops svm_x86_ops = {
21398 +static const struct kvm_x86_ops svm_x86_ops = {
21399 .cpu_has_kvm_support = has_svm,
21400 .disabled_by_bios = is_disabled,
21401 .hardware_setup = svm_hardware_setup,
21402 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21403 index e6d925f..e7a4af8 100644
21404 --- a/arch/x86/kvm/vmx.c
21405 +++ b/arch/x86/kvm/vmx.c
21406 @@ -570,7 +570,11 @@ static void reload_tss(void)
21407
21408 kvm_get_gdt(&gdt);
21409 descs = (void *)gdt.base;
21410 +
21411 + pax_open_kernel();
21412 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21413 + pax_close_kernel();
21414 +
21415 load_TR_desc();
21416 }
21417
21418 @@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21419 if (!cpu_has_vmx_flexpriority())
21420 flexpriority_enabled = 0;
21421
21422 - if (!cpu_has_vmx_tpr_shadow())
21423 - kvm_x86_ops->update_cr8_intercept = NULL;
21424 + if (!cpu_has_vmx_tpr_shadow()) {
21425 + pax_open_kernel();
21426 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21427 + pax_close_kernel();
21428 + }
21429
21430 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21431 kvm_disable_largepages();
21432 @@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21433 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21434
21435 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21436 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21437 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21438 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21439 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21440 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21441 @@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21442 "jmp .Lkvm_vmx_return \n\t"
21443 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21444 ".Lkvm_vmx_return: "
21445 +
21446 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21447 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21448 + ".Lkvm_vmx_return2: "
21449 +#endif
21450 +
21451 /* Save guest registers, load host registers, keep flags */
21452 "xchg %0, (%%"R"sp) \n\t"
21453 "mov %%"R"ax, %c[rax](%0) \n\t"
21454 @@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21455 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21456 #endif
21457 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21458 +
21459 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21460 + ,[cs]"i"(__KERNEL_CS)
21461 +#endif
21462 +
21463 : "cc", "memory"
21464 - , R"bx", R"di", R"si"
21465 + , R"ax", R"bx", R"di", R"si"
21466 #ifdef CONFIG_X86_64
21467 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21468 #endif
21469 @@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21470 if (vmx->rmode.irq.pending)
21471 fixup_rmode_irq(vmx);
21472
21473 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21474 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21475 +
21476 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21477 + loadsegment(fs, __KERNEL_PERCPU);
21478 +#endif
21479 +
21480 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21481 + __set_fs(current_thread_info()->addr_limit);
21482 +#endif
21483 +
21484 vmx->launched = 1;
21485
21486 vmx_complete_interrupts(vmx);
21487 @@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21488 return false;
21489 }
21490
21491 -static struct kvm_x86_ops vmx_x86_ops = {
21492 +static const struct kvm_x86_ops vmx_x86_ops = {
21493 .cpu_has_kvm_support = cpu_has_kvm_support,
21494 .disabled_by_bios = vmx_disabled_by_bios,
21495 .hardware_setup = hardware_setup,
21496 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21497 index df1cefb..5e882ad 100644
21498 --- a/arch/x86/kvm/x86.c
21499 +++ b/arch/x86/kvm/x86.c
21500 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21501 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21502 struct kvm_cpuid_entry2 __user *entries);
21503
21504 -struct kvm_x86_ops *kvm_x86_ops;
21505 +const struct kvm_x86_ops *kvm_x86_ops;
21506 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21507
21508 int ignore_msrs = 0;
21509 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21510 struct kvm_cpuid2 *cpuid,
21511 struct kvm_cpuid_entry2 __user *entries)
21512 {
21513 - int r;
21514 + int r, i;
21515
21516 r = -E2BIG;
21517 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21518 goto out;
21519 r = -EFAULT;
21520 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21521 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21522 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21523 goto out;
21524 + for (i = 0; i < cpuid->nent; ++i) {
21525 + struct kvm_cpuid_entry2 cpuid_entry;
21526 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21527 + goto out;
21528 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
21529 + }
21530 vcpu->arch.cpuid_nent = cpuid->nent;
21531 kvm_apic_set_version(vcpu);
21532 return 0;
21533 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21534 struct kvm_cpuid2 *cpuid,
21535 struct kvm_cpuid_entry2 __user *entries)
21536 {
21537 - int r;
21538 + int r, i;
21539
21540 vcpu_load(vcpu);
21541 r = -E2BIG;
21542 if (cpuid->nent < vcpu->arch.cpuid_nent)
21543 goto out;
21544 r = -EFAULT;
21545 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21546 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21547 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21548 goto out;
21549 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21550 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21551 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21552 + goto out;
21553 + }
21554 return 0;
21555
21556 out:
21557 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21558 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21559 struct kvm_interrupt *irq)
21560 {
21561 - if (irq->irq < 0 || irq->irq >= 256)
21562 + if (irq->irq >= 256)
21563 return -EINVAL;
21564 if (irqchip_in_kernel(vcpu->kvm))
21565 return -ENXIO;
21566 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21567 .notifier_call = kvmclock_cpufreq_notifier
21568 };
21569
21570 -int kvm_arch_init(void *opaque)
21571 +int kvm_arch_init(const void *opaque)
21572 {
21573 int r, cpu;
21574 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21575 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21576
21577 if (kvm_x86_ops) {
21578 printk(KERN_ERR "kvm: already loaded the other module\n");
21579 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21580 index 7e59dc1..b88c98f 100644
21581 --- a/arch/x86/lguest/boot.c
21582 +++ b/arch/x86/lguest/boot.c
21583 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21584 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21585 * Launcher to reboot us.
21586 */
21587 -static void lguest_restart(char *reason)
21588 +static __noreturn void lguest_restart(char *reason)
21589 {
21590 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21591 + BUG();
21592 }
21593
21594 /*G:050
21595 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21596 index 824fa0b..c619e96 100644
21597 --- a/arch/x86/lib/atomic64_32.c
21598 +++ b/arch/x86/lib/atomic64_32.c
21599 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21600 }
21601 EXPORT_SYMBOL(atomic64_cmpxchg);
21602
21603 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21604 +{
21605 + return cmpxchg8b(&ptr->counter, old_val, new_val);
21606 +}
21607 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21608 +
21609 /**
21610 * atomic64_xchg - xchg atomic64 variable
21611 * @ptr: pointer to type atomic64_t
21612 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21613 EXPORT_SYMBOL(atomic64_xchg);
21614
21615 /**
21616 + * atomic64_xchg_unchecked - xchg atomic64 variable
21617 + * @ptr: pointer to type atomic64_unchecked_t
21618 + * @new_val: value to assign
21619 + *
21620 + * Atomically xchgs the value of @ptr to @new_val and returns
21621 + * the old value.
21622 + */
21623 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21624 +{
21625 + /*
21626 + * Try first with a (possibly incorrect) assumption about
21627 + * what we have there. We'll do two loops most likely,
21628 + * but we'll get an ownership MESI transaction straight away
21629 + * instead of a read transaction followed by a
21630 + * flush-for-ownership transaction:
21631 + */
21632 + u64 old_val, real_val = 0;
21633 +
21634 + do {
21635 + old_val = real_val;
21636 +
21637 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21638 +
21639 + } while (real_val != old_val);
21640 +
21641 + return old_val;
21642 +}
21643 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
21644 +
21645 +/**
21646 * atomic64_set - set atomic64 variable
21647 * @ptr: pointer to type atomic64_t
21648 * @new_val: value to assign
21649 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21650 EXPORT_SYMBOL(atomic64_set);
21651
21652 /**
21653 -EXPORT_SYMBOL(atomic64_read);
21654 + * atomic64_unchecked_set - set atomic64 variable
21655 + * @ptr: pointer to type atomic64_unchecked_t
21656 + * @new_val: value to assign
21657 + *
21658 + * Atomically sets the value of @ptr to @new_val.
21659 + */
21660 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21661 +{
21662 + atomic64_xchg_unchecked(ptr, new_val);
21663 +}
21664 +EXPORT_SYMBOL(atomic64_set_unchecked);
21665 +
21666 +/**
21667 * atomic64_add_return - add and return
21668 * @delta: integer value to add
21669 * @ptr: pointer to type atomic64_t
21670 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21671 }
21672 EXPORT_SYMBOL(atomic64_add_return);
21673
21674 +/**
21675 + * atomic64_add_return_unchecked - add and return
21676 + * @delta: integer value to add
21677 + * @ptr: pointer to type atomic64_unchecked_t
21678 + *
21679 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
21680 + */
21681 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21682 +{
21683 + /*
21684 + * Try first with a (possibly incorrect) assumption about
21685 + * what we have there. We'll do two loops most likely,
21686 + * but we'll get an ownership MESI transaction straight away
21687 + * instead of a read transaction followed by a
21688 + * flush-for-ownership transaction:
21689 + */
21690 + u64 old_val, new_val, real_val = 0;
21691 +
21692 + do {
21693 + old_val = real_val;
21694 + new_val = old_val + delta;
21695 +
21696 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21697 +
21698 + } while (real_val != old_val);
21699 +
21700 + return new_val;
21701 +}
21702 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
21703 +
21704 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21705 {
21706 return atomic64_add_return(-delta, ptr);
21707 }
21708 EXPORT_SYMBOL(atomic64_sub_return);
21709
21710 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21711 +{
21712 + return atomic64_add_return_unchecked(-delta, ptr);
21713 +}
21714 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21715 +
21716 u64 atomic64_inc_return(atomic64_t *ptr)
21717 {
21718 return atomic64_add_return(1, ptr);
21719 }
21720 EXPORT_SYMBOL(atomic64_inc_return);
21721
21722 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21723 +{
21724 + return atomic64_add_return_unchecked(1, ptr);
21725 +}
21726 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21727 +
21728 u64 atomic64_dec_return(atomic64_t *ptr)
21729 {
21730 return atomic64_sub_return(1, ptr);
21731 }
21732 EXPORT_SYMBOL(atomic64_dec_return);
21733
21734 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21735 +{
21736 + return atomic64_sub_return_unchecked(1, ptr);
21737 +}
21738 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21739 +
21740 /**
21741 * atomic64_add - add integer to atomic64 variable
21742 * @delta: integer value to add
21743 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21744 EXPORT_SYMBOL(atomic64_add);
21745
21746 /**
21747 + * atomic64_add_unchecked - add integer to atomic64 variable
21748 + * @delta: integer value to add
21749 + * @ptr: pointer to type atomic64_unchecked_t
21750 + *
21751 + * Atomically adds @delta to @ptr.
21752 + */
21753 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21754 +{
21755 + atomic64_add_return_unchecked(delta, ptr);
21756 +}
21757 +EXPORT_SYMBOL(atomic64_add_unchecked);
21758 +
21759 +/**
21760 * atomic64_sub - subtract the atomic64 variable
21761 * @delta: integer value to subtract
21762 * @ptr: pointer to type atomic64_t
21763 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21764 EXPORT_SYMBOL(atomic64_sub);
21765
21766 /**
21767 + * atomic64_sub_unchecked - subtract the atomic64 variable
21768 + * @delta: integer value to subtract
21769 + * @ptr: pointer to type atomic64_unchecked_t
21770 + *
21771 + * Atomically subtracts @delta from @ptr.
21772 + */
21773 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21774 +{
21775 + atomic64_add_unchecked(-delta, ptr);
21776 +}
21777 +EXPORT_SYMBOL(atomic64_sub_unchecked);
21778 +
21779 +/**
21780 * atomic64_sub_and_test - subtract value from variable and test result
21781 * @delta: integer value to subtract
21782 * @ptr: pointer to type atomic64_t
21783 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21784 EXPORT_SYMBOL(atomic64_inc);
21785
21786 /**
21787 + * atomic64_inc_unchecked - increment atomic64 variable
21788 + * @ptr: pointer to type atomic64_unchecked_t
21789 + *
21790 + * Atomically increments @ptr by 1.
21791 + */
21792 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21793 +{
21794 + atomic64_add_unchecked(1, ptr);
21795 +}
21796 +EXPORT_SYMBOL(atomic64_inc_unchecked);
21797 +
21798 +/**
21799 * atomic64_dec - decrement atomic64 variable
21800 * @ptr: pointer to type atomic64_t
21801 *
21802 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21803 EXPORT_SYMBOL(atomic64_dec);
21804
21805 /**
21806 + * atomic64_dec_unchecked - decrement atomic64 variable
21807 + * @ptr: pointer to type atomic64_unchecked_t
21808 + *
21809 + * Atomically decrements @ptr by 1.
21810 + */
21811 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21812 +{
21813 + atomic64_sub_unchecked(1, ptr);
21814 +}
21815 +EXPORT_SYMBOL(atomic64_dec_unchecked);
21816 +
21817 +/**
21818 * atomic64_dec_and_test - decrement and test
21819 * @ptr: pointer to type atomic64_t
21820 *
21821 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21822 index adbccd0..98f96c8 100644
21823 --- a/arch/x86/lib/checksum_32.S
21824 +++ b/arch/x86/lib/checksum_32.S
21825 @@ -28,7 +28,8 @@
21826 #include <linux/linkage.h>
21827 #include <asm/dwarf2.h>
21828 #include <asm/errno.h>
21829 -
21830 +#include <asm/segment.h>
21831 +
21832 /*
21833 * computes a partial checksum, e.g. for TCP/UDP fragments
21834 */
21835 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21836
21837 #define ARGBASE 16
21838 #define FP 12
21839 -
21840 -ENTRY(csum_partial_copy_generic)
21841 +
21842 +ENTRY(csum_partial_copy_generic_to_user)
21843 CFI_STARTPROC
21844 +
21845 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21846 + pushl %gs
21847 + CFI_ADJUST_CFA_OFFSET 4
21848 + popl %es
21849 + CFI_ADJUST_CFA_OFFSET -4
21850 + jmp csum_partial_copy_generic
21851 +#endif
21852 +
21853 +ENTRY(csum_partial_copy_generic_from_user)
21854 +
21855 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21856 + pushl %gs
21857 + CFI_ADJUST_CFA_OFFSET 4
21858 + popl %ds
21859 + CFI_ADJUST_CFA_OFFSET -4
21860 +#endif
21861 +
21862 +ENTRY(csum_partial_copy_generic)
21863 subl $4,%esp
21864 CFI_ADJUST_CFA_OFFSET 4
21865 pushl %edi
21866 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21867 jmp 4f
21868 SRC(1: movw (%esi), %bx )
21869 addl $2, %esi
21870 -DST( movw %bx, (%edi) )
21871 +DST( movw %bx, %es:(%edi) )
21872 addl $2, %edi
21873 addw %bx, %ax
21874 adcl $0, %eax
21875 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21876 SRC(1: movl (%esi), %ebx )
21877 SRC( movl 4(%esi), %edx )
21878 adcl %ebx, %eax
21879 -DST( movl %ebx, (%edi) )
21880 +DST( movl %ebx, %es:(%edi) )
21881 adcl %edx, %eax
21882 -DST( movl %edx, 4(%edi) )
21883 +DST( movl %edx, %es:4(%edi) )
21884
21885 SRC( movl 8(%esi), %ebx )
21886 SRC( movl 12(%esi), %edx )
21887 adcl %ebx, %eax
21888 -DST( movl %ebx, 8(%edi) )
21889 +DST( movl %ebx, %es:8(%edi) )
21890 adcl %edx, %eax
21891 -DST( movl %edx, 12(%edi) )
21892 +DST( movl %edx, %es:12(%edi) )
21893
21894 SRC( movl 16(%esi), %ebx )
21895 SRC( movl 20(%esi), %edx )
21896 adcl %ebx, %eax
21897 -DST( movl %ebx, 16(%edi) )
21898 +DST( movl %ebx, %es:16(%edi) )
21899 adcl %edx, %eax
21900 -DST( movl %edx, 20(%edi) )
21901 +DST( movl %edx, %es:20(%edi) )
21902
21903 SRC( movl 24(%esi), %ebx )
21904 SRC( movl 28(%esi), %edx )
21905 adcl %ebx, %eax
21906 -DST( movl %ebx, 24(%edi) )
21907 +DST( movl %ebx, %es:24(%edi) )
21908 adcl %edx, %eax
21909 -DST( movl %edx, 28(%edi) )
21910 +DST( movl %edx, %es:28(%edi) )
21911
21912 lea 32(%esi), %esi
21913 lea 32(%edi), %edi
21914 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21915 shrl $2, %edx # This clears CF
21916 SRC(3: movl (%esi), %ebx )
21917 adcl %ebx, %eax
21918 -DST( movl %ebx, (%edi) )
21919 +DST( movl %ebx, %es:(%edi) )
21920 lea 4(%esi), %esi
21921 lea 4(%edi), %edi
21922 dec %edx
21923 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21924 jb 5f
21925 SRC( movw (%esi), %cx )
21926 leal 2(%esi), %esi
21927 -DST( movw %cx, (%edi) )
21928 +DST( movw %cx, %es:(%edi) )
21929 leal 2(%edi), %edi
21930 je 6f
21931 shll $16,%ecx
21932 SRC(5: movb (%esi), %cl )
21933 -DST( movb %cl, (%edi) )
21934 +DST( movb %cl, %es:(%edi) )
21935 6: addl %ecx, %eax
21936 adcl $0, %eax
21937 7:
21938 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21939
21940 6001:
21941 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21942 - movl $-EFAULT, (%ebx)
21943 + movl $-EFAULT, %ss:(%ebx)
21944
21945 # zero the complete destination - computing the rest
21946 # is too much work
21947 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21948
21949 6002:
21950 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21951 - movl $-EFAULT,(%ebx)
21952 + movl $-EFAULT,%ss:(%ebx)
21953 jmp 5000b
21954
21955 .previous
21956
21957 + pushl %ss
21958 + CFI_ADJUST_CFA_OFFSET 4
21959 + popl %ds
21960 + CFI_ADJUST_CFA_OFFSET -4
21961 + pushl %ss
21962 + CFI_ADJUST_CFA_OFFSET 4
21963 + popl %es
21964 + CFI_ADJUST_CFA_OFFSET -4
21965 popl %ebx
21966 CFI_ADJUST_CFA_OFFSET -4
21967 CFI_RESTORE ebx
21968 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21969 CFI_ADJUST_CFA_OFFSET -4
21970 ret
21971 CFI_ENDPROC
21972 -ENDPROC(csum_partial_copy_generic)
21973 +ENDPROC(csum_partial_copy_generic_to_user)
21974
21975 #else
21976
21977 /* Version for PentiumII/PPro */
21978
21979 #define ROUND1(x) \
21980 + nop; nop; nop; \
21981 SRC(movl x(%esi), %ebx ) ; \
21982 addl %ebx, %eax ; \
21983 - DST(movl %ebx, x(%edi) ) ;
21984 + DST(movl %ebx, %es:x(%edi)) ;
21985
21986 #define ROUND(x) \
21987 + nop; nop; nop; \
21988 SRC(movl x(%esi), %ebx ) ; \
21989 adcl %ebx, %eax ; \
21990 - DST(movl %ebx, x(%edi) ) ;
21991 + DST(movl %ebx, %es:x(%edi)) ;
21992
21993 #define ARGBASE 12
21994 -
21995 -ENTRY(csum_partial_copy_generic)
21996 +
21997 +ENTRY(csum_partial_copy_generic_to_user)
21998 CFI_STARTPROC
21999 +
22000 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22001 + pushl %gs
22002 + CFI_ADJUST_CFA_OFFSET 4
22003 + popl %es
22004 + CFI_ADJUST_CFA_OFFSET -4
22005 + jmp csum_partial_copy_generic
22006 +#endif
22007 +
22008 +ENTRY(csum_partial_copy_generic_from_user)
22009 +
22010 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22011 + pushl %gs
22012 + CFI_ADJUST_CFA_OFFSET 4
22013 + popl %ds
22014 + CFI_ADJUST_CFA_OFFSET -4
22015 +#endif
22016 +
22017 +ENTRY(csum_partial_copy_generic)
22018 pushl %ebx
22019 CFI_ADJUST_CFA_OFFSET 4
22020 CFI_REL_OFFSET ebx, 0
22021 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
22022 subl %ebx, %edi
22023 lea -1(%esi),%edx
22024 andl $-32,%edx
22025 - lea 3f(%ebx,%ebx), %ebx
22026 + lea 3f(%ebx,%ebx,2), %ebx
22027 testl %esi, %esi
22028 jmp *%ebx
22029 1: addl $64,%esi
22030 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
22031 jb 5f
22032 SRC( movw (%esi), %dx )
22033 leal 2(%esi), %esi
22034 -DST( movw %dx, (%edi) )
22035 +DST( movw %dx, %es:(%edi) )
22036 leal 2(%edi), %edi
22037 je 6f
22038 shll $16,%edx
22039 5:
22040 SRC( movb (%esi), %dl )
22041 -DST( movb %dl, (%edi) )
22042 +DST( movb %dl, %es:(%edi) )
22043 6: addl %edx, %eax
22044 adcl $0, %eax
22045 7:
22046 .section .fixup, "ax"
22047 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
22048 - movl $-EFAULT, (%ebx)
22049 + movl $-EFAULT, %ss:(%ebx)
22050 # zero the complete destination (computing the rest is too much work)
22051 movl ARGBASE+8(%esp),%edi # dst
22052 movl ARGBASE+12(%esp),%ecx # len
22053 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
22054 rep; stosb
22055 jmp 7b
22056 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22057 - movl $-EFAULT, (%ebx)
22058 + movl $-EFAULT, %ss:(%ebx)
22059 jmp 7b
22060 .previous
22061
22062 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22063 + pushl %ss
22064 + CFI_ADJUST_CFA_OFFSET 4
22065 + popl %ds
22066 + CFI_ADJUST_CFA_OFFSET -4
22067 + pushl %ss
22068 + CFI_ADJUST_CFA_OFFSET 4
22069 + popl %es
22070 + CFI_ADJUST_CFA_OFFSET -4
22071 +#endif
22072 +
22073 popl %esi
22074 CFI_ADJUST_CFA_OFFSET -4
22075 CFI_RESTORE esi
22076 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
22077 CFI_RESTORE ebx
22078 ret
22079 CFI_ENDPROC
22080 -ENDPROC(csum_partial_copy_generic)
22081 +ENDPROC(csum_partial_copy_generic_to_user)
22082
22083 #undef ROUND
22084 #undef ROUND1
22085 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
22086 index ebeafcc..1e3a402 100644
22087 --- a/arch/x86/lib/clear_page_64.S
22088 +++ b/arch/x86/lib/clear_page_64.S
22089 @@ -1,5 +1,6 @@
22090 #include <linux/linkage.h>
22091 #include <asm/dwarf2.h>
22092 +#include <asm/alternative-asm.h>
22093
22094 /*
22095 * Zero a page.
22096 @@ -10,6 +11,7 @@ ENTRY(clear_page_c)
22097 movl $4096/8,%ecx
22098 xorl %eax,%eax
22099 rep stosq
22100 + pax_force_retaddr
22101 ret
22102 CFI_ENDPROC
22103 ENDPROC(clear_page_c)
22104 @@ -33,6 +35,7 @@ ENTRY(clear_page)
22105 leaq 64(%rdi),%rdi
22106 jnz .Lloop
22107 nop
22108 + pax_force_retaddr
22109 ret
22110 CFI_ENDPROC
22111 .Lclear_page_end:
22112 @@ -43,7 +46,7 @@ ENDPROC(clear_page)
22113
22114 #include <asm/cpufeature.h>
22115
22116 - .section .altinstr_replacement,"ax"
22117 + .section .altinstr_replacement,"a"
22118 1: .byte 0xeb /* jmp <disp8> */
22119 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
22120 2:
22121 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
22122 index 727a5d4..333818a 100644
22123 --- a/arch/x86/lib/copy_page_64.S
22124 +++ b/arch/x86/lib/copy_page_64.S
22125 @@ -2,12 +2,14 @@
22126
22127 #include <linux/linkage.h>
22128 #include <asm/dwarf2.h>
22129 +#include <asm/alternative-asm.h>
22130
22131 ALIGN
22132 copy_page_c:
22133 CFI_STARTPROC
22134 movl $4096/8,%ecx
22135 rep movsq
22136 + pax_force_retaddr
22137 ret
22138 CFI_ENDPROC
22139 ENDPROC(copy_page_c)
22140 @@ -38,7 +40,7 @@ ENTRY(copy_page)
22141 movq 16 (%rsi), %rdx
22142 movq 24 (%rsi), %r8
22143 movq 32 (%rsi), %r9
22144 - movq 40 (%rsi), %r10
22145 + movq 40 (%rsi), %r13
22146 movq 48 (%rsi), %r11
22147 movq 56 (%rsi), %r12
22148
22149 @@ -49,7 +51,7 @@ ENTRY(copy_page)
22150 movq %rdx, 16 (%rdi)
22151 movq %r8, 24 (%rdi)
22152 movq %r9, 32 (%rdi)
22153 - movq %r10, 40 (%rdi)
22154 + movq %r13, 40 (%rdi)
22155 movq %r11, 48 (%rdi)
22156 movq %r12, 56 (%rdi)
22157
22158 @@ -68,7 +70,7 @@ ENTRY(copy_page)
22159 movq 16 (%rsi), %rdx
22160 movq 24 (%rsi), %r8
22161 movq 32 (%rsi), %r9
22162 - movq 40 (%rsi), %r10
22163 + movq 40 (%rsi), %r13
22164 movq 48 (%rsi), %r11
22165 movq 56 (%rsi), %r12
22166
22167 @@ -77,7 +79,7 @@ ENTRY(copy_page)
22168 movq %rdx, 16 (%rdi)
22169 movq %r8, 24 (%rdi)
22170 movq %r9, 32 (%rdi)
22171 - movq %r10, 40 (%rdi)
22172 + movq %r13, 40 (%rdi)
22173 movq %r11, 48 (%rdi)
22174 movq %r12, 56 (%rdi)
22175
22176 @@ -94,6 +96,7 @@ ENTRY(copy_page)
22177 CFI_RESTORE r13
22178 addq $3*8,%rsp
22179 CFI_ADJUST_CFA_OFFSET -3*8
22180 + pax_force_retaddr
22181 ret
22182 .Lcopy_page_end:
22183 CFI_ENDPROC
22184 @@ -104,7 +107,7 @@ ENDPROC(copy_page)
22185
22186 #include <asm/cpufeature.h>
22187
22188 - .section .altinstr_replacement,"ax"
22189 + .section .altinstr_replacement,"a"
22190 1: .byte 0xeb /* jmp <disp8> */
22191 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
22192 2:
22193 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
22194 index af8debd..40c75f3 100644
22195 --- a/arch/x86/lib/copy_user_64.S
22196 +++ b/arch/x86/lib/copy_user_64.S
22197 @@ -15,13 +15,15 @@
22198 #include <asm/asm-offsets.h>
22199 #include <asm/thread_info.h>
22200 #include <asm/cpufeature.h>
22201 +#include <asm/pgtable.h>
22202 +#include <asm/alternative-asm.h>
22203
22204 .macro ALTERNATIVE_JUMP feature,orig,alt
22205 0:
22206 .byte 0xe9 /* 32bit jump */
22207 .long \orig-1f /* by default jump to orig */
22208 1:
22209 - .section .altinstr_replacement,"ax"
22210 + .section .altinstr_replacement,"a"
22211 2: .byte 0xe9 /* near jump with 32bit immediate */
22212 .long \alt-1b /* offset */ /* or alternatively to alt */
22213 .previous
22214 @@ -64,55 +66,26 @@
22215 #endif
22216 .endm
22217
22218 -/* Standard copy_to_user with segment limit checking */
22219 -ENTRY(copy_to_user)
22220 - CFI_STARTPROC
22221 - GET_THREAD_INFO(%rax)
22222 - movq %rdi,%rcx
22223 - addq %rdx,%rcx
22224 - jc bad_to_user
22225 - cmpq TI_addr_limit(%rax),%rcx
22226 - ja bad_to_user
22227 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22228 - CFI_ENDPROC
22229 -ENDPROC(copy_to_user)
22230 -
22231 -/* Standard copy_from_user with segment limit checking */
22232 -ENTRY(copy_from_user)
22233 - CFI_STARTPROC
22234 - GET_THREAD_INFO(%rax)
22235 - movq %rsi,%rcx
22236 - addq %rdx,%rcx
22237 - jc bad_from_user
22238 - cmpq TI_addr_limit(%rax),%rcx
22239 - ja bad_from_user
22240 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22241 - CFI_ENDPROC
22242 -ENDPROC(copy_from_user)
22243 -
22244 ENTRY(copy_user_generic)
22245 CFI_STARTPROC
22246 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22247 CFI_ENDPROC
22248 ENDPROC(copy_user_generic)
22249
22250 -ENTRY(__copy_from_user_inatomic)
22251 - CFI_STARTPROC
22252 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22253 - CFI_ENDPROC
22254 -ENDPROC(__copy_from_user_inatomic)
22255 -
22256 .section .fixup,"ax"
22257 /* must zero dest */
22258 ENTRY(bad_from_user)
22259 bad_from_user:
22260 CFI_STARTPROC
22261 + testl %edx,%edx
22262 + js bad_to_user
22263 movl %edx,%ecx
22264 xorl %eax,%eax
22265 rep
22266 stosb
22267 bad_to_user:
22268 movl %edx,%eax
22269 + pax_force_retaddr
22270 ret
22271 CFI_ENDPROC
22272 ENDPROC(bad_from_user)
22273 @@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22274 jz 17f
22275 1: movq (%rsi),%r8
22276 2: movq 1*8(%rsi),%r9
22277 -3: movq 2*8(%rsi),%r10
22278 +3: movq 2*8(%rsi),%rax
22279 4: movq 3*8(%rsi),%r11
22280 5: movq %r8,(%rdi)
22281 6: movq %r9,1*8(%rdi)
22282 -7: movq %r10,2*8(%rdi)
22283 +7: movq %rax,2*8(%rdi)
22284 8: movq %r11,3*8(%rdi)
22285 9: movq 4*8(%rsi),%r8
22286 10: movq 5*8(%rsi),%r9
22287 -11: movq 6*8(%rsi),%r10
22288 +11: movq 6*8(%rsi),%rax
22289 12: movq 7*8(%rsi),%r11
22290 13: movq %r8,4*8(%rdi)
22291 14: movq %r9,5*8(%rdi)
22292 -15: movq %r10,6*8(%rdi)
22293 +15: movq %rax,6*8(%rdi)
22294 16: movq %r11,7*8(%rdi)
22295 leaq 64(%rsi),%rsi
22296 leaq 64(%rdi),%rdi
22297 @@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22298 decl %ecx
22299 jnz 21b
22300 23: xor %eax,%eax
22301 + pax_force_retaddr
22302 ret
22303
22304 .section .fixup,"ax"
22305 @@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22306 3: rep
22307 movsb
22308 4: xorl %eax,%eax
22309 + pax_force_retaddr
22310 ret
22311
22312 .section .fixup,"ax"
22313 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22314 index cb0c112..e3a6895 100644
22315 --- a/arch/x86/lib/copy_user_nocache_64.S
22316 +++ b/arch/x86/lib/copy_user_nocache_64.S
22317 @@ -8,12 +8,14 @@
22318
22319 #include <linux/linkage.h>
22320 #include <asm/dwarf2.h>
22321 +#include <asm/alternative-asm.h>
22322
22323 #define FIX_ALIGNMENT 1
22324
22325 #include <asm/current.h>
22326 #include <asm/asm-offsets.h>
22327 #include <asm/thread_info.h>
22328 +#include <asm/pgtable.h>
22329
22330 .macro ALIGN_DESTINATION
22331 #ifdef FIX_ALIGNMENT
22332 @@ -50,6 +52,15 @@
22333 */
22334 ENTRY(__copy_user_nocache)
22335 CFI_STARTPROC
22336 +
22337 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22338 + mov $PAX_USER_SHADOW_BASE,%rcx
22339 + cmp %rcx,%rsi
22340 + jae 1f
22341 + add %rcx,%rsi
22342 +1:
22343 +#endif
22344 +
22345 cmpl $8,%edx
22346 jb 20f /* less then 8 bytes, go to byte copy loop */
22347 ALIGN_DESTINATION
22348 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22349 jz 17f
22350 1: movq (%rsi),%r8
22351 2: movq 1*8(%rsi),%r9
22352 -3: movq 2*8(%rsi),%r10
22353 +3: movq 2*8(%rsi),%rax
22354 4: movq 3*8(%rsi),%r11
22355 5: movnti %r8,(%rdi)
22356 6: movnti %r9,1*8(%rdi)
22357 -7: movnti %r10,2*8(%rdi)
22358 +7: movnti %rax,2*8(%rdi)
22359 8: movnti %r11,3*8(%rdi)
22360 9: movq 4*8(%rsi),%r8
22361 10: movq 5*8(%rsi),%r9
22362 -11: movq 6*8(%rsi),%r10
22363 +11: movq 6*8(%rsi),%rax
22364 12: movq 7*8(%rsi),%r11
22365 13: movnti %r8,4*8(%rdi)
22366 14: movnti %r9,5*8(%rdi)
22367 -15: movnti %r10,6*8(%rdi)
22368 +15: movnti %rax,6*8(%rdi)
22369 16: movnti %r11,7*8(%rdi)
22370 leaq 64(%rsi),%rsi
22371 leaq 64(%rdi),%rdi
22372 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22373 jnz 21b
22374 23: xorl %eax,%eax
22375 sfence
22376 + pax_force_retaddr
22377 ret
22378
22379 .section .fixup,"ax"
22380 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22381 index f0dba36..48cb4d6 100644
22382 --- a/arch/x86/lib/csum-copy_64.S
22383 +++ b/arch/x86/lib/csum-copy_64.S
22384 @@ -8,6 +8,7 @@
22385 #include <linux/linkage.h>
22386 #include <asm/dwarf2.h>
22387 #include <asm/errno.h>
22388 +#include <asm/alternative-asm.h>
22389
22390 /*
22391 * Checksum copy with exception handling.
22392 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22393 CFI_RESTORE rbp
22394 addq $7*8,%rsp
22395 CFI_ADJUST_CFA_OFFSET -7*8
22396 + pax_force_retaddr 0, 1
22397 ret
22398 CFI_RESTORE_STATE
22399
22400 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22401 index 459b58a..9570bc7 100644
22402 --- a/arch/x86/lib/csum-wrappers_64.c
22403 +++ b/arch/x86/lib/csum-wrappers_64.c
22404 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22405 len -= 2;
22406 }
22407 }
22408 - isum = csum_partial_copy_generic((__force const void *)src,
22409 +
22410 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22411 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22412 + src += PAX_USER_SHADOW_BASE;
22413 +#endif
22414 +
22415 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
22416 dst, len, isum, errp, NULL);
22417 if (unlikely(*errp))
22418 goto out_err;
22419 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22420 }
22421
22422 *errp = 0;
22423 - return csum_partial_copy_generic(src, (void __force *)dst,
22424 +
22425 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22426 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22427 + dst += PAX_USER_SHADOW_BASE;
22428 +#endif
22429 +
22430 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22431 len, isum, NULL, errp);
22432 }
22433 EXPORT_SYMBOL(csum_partial_copy_to_user);
22434 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22435 index 51f1504..ddac4c1 100644
22436 --- a/arch/x86/lib/getuser.S
22437 +++ b/arch/x86/lib/getuser.S
22438 @@ -33,15 +33,38 @@
22439 #include <asm/asm-offsets.h>
22440 #include <asm/thread_info.h>
22441 #include <asm/asm.h>
22442 +#include <asm/segment.h>
22443 +#include <asm/pgtable.h>
22444 +#include <asm/alternative-asm.h>
22445 +
22446 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22447 +#define __copyuser_seg gs;
22448 +#else
22449 +#define __copyuser_seg
22450 +#endif
22451
22452 .text
22453 ENTRY(__get_user_1)
22454 CFI_STARTPROC
22455 +
22456 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22457 GET_THREAD_INFO(%_ASM_DX)
22458 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22459 jae bad_get_user
22460 -1: movzb (%_ASM_AX),%edx
22461 +
22462 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22463 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22464 + cmp %_ASM_DX,%_ASM_AX
22465 + jae 1234f
22466 + add %_ASM_DX,%_ASM_AX
22467 +1234:
22468 +#endif
22469 +
22470 +#endif
22471 +
22472 +1: __copyuser_seg movzb (%_ASM_AX),%edx
22473 xor %eax,%eax
22474 + pax_force_retaddr
22475 ret
22476 CFI_ENDPROC
22477 ENDPROC(__get_user_1)
22478 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22479 ENTRY(__get_user_2)
22480 CFI_STARTPROC
22481 add $1,%_ASM_AX
22482 +
22483 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22484 jc bad_get_user
22485 GET_THREAD_INFO(%_ASM_DX)
22486 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22487 jae bad_get_user
22488 -2: movzwl -1(%_ASM_AX),%edx
22489 +
22490 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22491 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22492 + cmp %_ASM_DX,%_ASM_AX
22493 + jae 1234f
22494 + add %_ASM_DX,%_ASM_AX
22495 +1234:
22496 +#endif
22497 +
22498 +#endif
22499 +
22500 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22501 xor %eax,%eax
22502 + pax_force_retaddr
22503 ret
22504 CFI_ENDPROC
22505 ENDPROC(__get_user_2)
22506 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22507 ENTRY(__get_user_4)
22508 CFI_STARTPROC
22509 add $3,%_ASM_AX
22510 +
22511 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22512 jc bad_get_user
22513 GET_THREAD_INFO(%_ASM_DX)
22514 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22515 jae bad_get_user
22516 -3: mov -3(%_ASM_AX),%edx
22517 +
22518 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22519 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22520 + cmp %_ASM_DX,%_ASM_AX
22521 + jae 1234f
22522 + add %_ASM_DX,%_ASM_AX
22523 +1234:
22524 +#endif
22525 +
22526 +#endif
22527 +
22528 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
22529 xor %eax,%eax
22530 + pax_force_retaddr
22531 ret
22532 CFI_ENDPROC
22533 ENDPROC(__get_user_4)
22534 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22535 GET_THREAD_INFO(%_ASM_DX)
22536 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22537 jae bad_get_user
22538 +
22539 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22540 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22541 + cmp %_ASM_DX,%_ASM_AX
22542 + jae 1234f
22543 + add %_ASM_DX,%_ASM_AX
22544 +1234:
22545 +#endif
22546 +
22547 4: movq -7(%_ASM_AX),%_ASM_DX
22548 xor %eax,%eax
22549 + pax_force_retaddr
22550 ret
22551 CFI_ENDPROC
22552 ENDPROC(__get_user_8)
22553 @@ -91,6 +152,7 @@ bad_get_user:
22554 CFI_STARTPROC
22555 xor %edx,%edx
22556 mov $(-EFAULT),%_ASM_AX
22557 + pax_force_retaddr
22558 ret
22559 CFI_ENDPROC
22560 END(bad_get_user)
22561 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22562 index 05a95e7..326f2fa 100644
22563 --- a/arch/x86/lib/iomap_copy_64.S
22564 +++ b/arch/x86/lib/iomap_copy_64.S
22565 @@ -17,6 +17,7 @@
22566
22567 #include <linux/linkage.h>
22568 #include <asm/dwarf2.h>
22569 +#include <asm/alternative-asm.h>
22570
22571 /*
22572 * override generic version in lib/iomap_copy.c
22573 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22574 CFI_STARTPROC
22575 movl %edx,%ecx
22576 rep movsd
22577 + pax_force_retaddr
22578 ret
22579 CFI_ENDPROC
22580 ENDPROC(__iowrite32_copy)
22581 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22582 index ad5441e..610e351 100644
22583 --- a/arch/x86/lib/memcpy_64.S
22584 +++ b/arch/x86/lib/memcpy_64.S
22585 @@ -4,6 +4,7 @@
22586
22587 #include <asm/cpufeature.h>
22588 #include <asm/dwarf2.h>
22589 +#include <asm/alternative-asm.h>
22590
22591 /*
22592 * memcpy - Copy a memory block.
22593 @@ -34,6 +35,7 @@ memcpy_c:
22594 rep movsq
22595 movl %edx, %ecx
22596 rep movsb
22597 + pax_force_retaddr
22598 ret
22599 CFI_ENDPROC
22600 ENDPROC(memcpy_c)
22601 @@ -118,6 +120,7 @@ ENTRY(memcpy)
22602 jnz .Lloop_1
22603
22604 .Lend:
22605 + pax_force_retaddr 0, 1
22606 ret
22607 CFI_ENDPROC
22608 ENDPROC(memcpy)
22609 @@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22610 * It is also a lot simpler. Use this when possible:
22611 */
22612
22613 - .section .altinstr_replacement, "ax"
22614 + .section .altinstr_replacement, "a"
22615 1: .byte 0xeb /* jmp <disp8> */
22616 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22617 2:
22618 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22619 index 2c59481..7e9ba4e 100644
22620 --- a/arch/x86/lib/memset_64.S
22621 +++ b/arch/x86/lib/memset_64.S
22622 @@ -2,6 +2,7 @@
22623
22624 #include <linux/linkage.h>
22625 #include <asm/dwarf2.h>
22626 +#include <asm/alternative-asm.h>
22627
22628 /*
22629 * ISO C memset - set a memory block to a byte value.
22630 @@ -28,6 +29,7 @@ memset_c:
22631 movl %r8d,%ecx
22632 rep stosb
22633 movq %r9,%rax
22634 + pax_force_retaddr
22635 ret
22636 CFI_ENDPROC
22637 ENDPROC(memset_c)
22638 @@ -35,13 +37,13 @@ ENDPROC(memset_c)
22639 ENTRY(memset)
22640 ENTRY(__memset)
22641 CFI_STARTPROC
22642 - movq %rdi,%r10
22643 movq %rdx,%r11
22644
22645 /* expand byte value */
22646 movzbl %sil,%ecx
22647 movabs $0x0101010101010101,%rax
22648 mul %rcx /* with rax, clobbers rdx */
22649 + movq %rdi,%rdx
22650
22651 /* align dst */
22652 movl %edi,%r9d
22653 @@ -95,7 +97,8 @@ ENTRY(__memset)
22654 jnz .Lloop_1
22655
22656 .Lende:
22657 - movq %r10,%rax
22658 + movq %rdx,%rax
22659 + pax_force_retaddr
22660 ret
22661
22662 CFI_RESTORE_STATE
22663 @@ -118,7 +121,7 @@ ENDPROC(__memset)
22664
22665 #include <asm/cpufeature.h>
22666
22667 - .section .altinstr_replacement,"ax"
22668 + .section .altinstr_replacement,"a"
22669 1: .byte 0xeb /* jmp <disp8> */
22670 .byte (memset_c - memset) - (2f - 1b) /* offset */
22671 2:
22672 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22673 index c9f2d9b..e7fd2c0 100644
22674 --- a/arch/x86/lib/mmx_32.c
22675 +++ b/arch/x86/lib/mmx_32.c
22676 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22677 {
22678 void *p;
22679 int i;
22680 + unsigned long cr0;
22681
22682 if (unlikely(in_interrupt()))
22683 return __memcpy(to, from, len);
22684 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22685 kernel_fpu_begin();
22686
22687 __asm__ __volatile__ (
22688 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22689 - " prefetch 64(%0)\n"
22690 - " prefetch 128(%0)\n"
22691 - " prefetch 192(%0)\n"
22692 - " prefetch 256(%0)\n"
22693 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22694 + " prefetch 64(%1)\n"
22695 + " prefetch 128(%1)\n"
22696 + " prefetch 192(%1)\n"
22697 + " prefetch 256(%1)\n"
22698 "2: \n"
22699 ".section .fixup, \"ax\"\n"
22700 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22701 + "3: \n"
22702 +
22703 +#ifdef CONFIG_PAX_KERNEXEC
22704 + " movl %%cr0, %0\n"
22705 + " movl %0, %%eax\n"
22706 + " andl $0xFFFEFFFF, %%eax\n"
22707 + " movl %%eax, %%cr0\n"
22708 +#endif
22709 +
22710 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22711 +
22712 +#ifdef CONFIG_PAX_KERNEXEC
22713 + " movl %0, %%cr0\n"
22714 +#endif
22715 +
22716 " jmp 2b\n"
22717 ".previous\n"
22718 _ASM_EXTABLE(1b, 3b)
22719 - : : "r" (from));
22720 + : "=&r" (cr0) : "r" (from) : "ax");
22721
22722 for ( ; i > 5; i--) {
22723 __asm__ __volatile__ (
22724 - "1: prefetch 320(%0)\n"
22725 - "2: movq (%0), %%mm0\n"
22726 - " movq 8(%0), %%mm1\n"
22727 - " movq 16(%0), %%mm2\n"
22728 - " movq 24(%0), %%mm3\n"
22729 - " movq %%mm0, (%1)\n"
22730 - " movq %%mm1, 8(%1)\n"
22731 - " movq %%mm2, 16(%1)\n"
22732 - " movq %%mm3, 24(%1)\n"
22733 - " movq 32(%0), %%mm0\n"
22734 - " movq 40(%0), %%mm1\n"
22735 - " movq 48(%0), %%mm2\n"
22736 - " movq 56(%0), %%mm3\n"
22737 - " movq %%mm0, 32(%1)\n"
22738 - " movq %%mm1, 40(%1)\n"
22739 - " movq %%mm2, 48(%1)\n"
22740 - " movq %%mm3, 56(%1)\n"
22741 + "1: prefetch 320(%1)\n"
22742 + "2: movq (%1), %%mm0\n"
22743 + " movq 8(%1), %%mm1\n"
22744 + " movq 16(%1), %%mm2\n"
22745 + " movq 24(%1), %%mm3\n"
22746 + " movq %%mm0, (%2)\n"
22747 + " movq %%mm1, 8(%2)\n"
22748 + " movq %%mm2, 16(%2)\n"
22749 + " movq %%mm3, 24(%2)\n"
22750 + " movq 32(%1), %%mm0\n"
22751 + " movq 40(%1), %%mm1\n"
22752 + " movq 48(%1), %%mm2\n"
22753 + " movq 56(%1), %%mm3\n"
22754 + " movq %%mm0, 32(%2)\n"
22755 + " movq %%mm1, 40(%2)\n"
22756 + " movq %%mm2, 48(%2)\n"
22757 + " movq %%mm3, 56(%2)\n"
22758 ".section .fixup, \"ax\"\n"
22759 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22760 + "3:\n"
22761 +
22762 +#ifdef CONFIG_PAX_KERNEXEC
22763 + " movl %%cr0, %0\n"
22764 + " movl %0, %%eax\n"
22765 + " andl $0xFFFEFFFF, %%eax\n"
22766 + " movl %%eax, %%cr0\n"
22767 +#endif
22768 +
22769 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22770 +
22771 +#ifdef CONFIG_PAX_KERNEXEC
22772 + " movl %0, %%cr0\n"
22773 +#endif
22774 +
22775 " jmp 2b\n"
22776 ".previous\n"
22777 _ASM_EXTABLE(1b, 3b)
22778 - : : "r" (from), "r" (to) : "memory");
22779 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22780
22781 from += 64;
22782 to += 64;
22783 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22784 static void fast_copy_page(void *to, void *from)
22785 {
22786 int i;
22787 + unsigned long cr0;
22788
22789 kernel_fpu_begin();
22790
22791 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22792 * but that is for later. -AV
22793 */
22794 __asm__ __volatile__(
22795 - "1: prefetch (%0)\n"
22796 - " prefetch 64(%0)\n"
22797 - " prefetch 128(%0)\n"
22798 - " prefetch 192(%0)\n"
22799 - " prefetch 256(%0)\n"
22800 + "1: prefetch (%1)\n"
22801 + " prefetch 64(%1)\n"
22802 + " prefetch 128(%1)\n"
22803 + " prefetch 192(%1)\n"
22804 + " prefetch 256(%1)\n"
22805 "2: \n"
22806 ".section .fixup, \"ax\"\n"
22807 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22808 + "3: \n"
22809 +
22810 +#ifdef CONFIG_PAX_KERNEXEC
22811 + " movl %%cr0, %0\n"
22812 + " movl %0, %%eax\n"
22813 + " andl $0xFFFEFFFF, %%eax\n"
22814 + " movl %%eax, %%cr0\n"
22815 +#endif
22816 +
22817 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22818 +
22819 +#ifdef CONFIG_PAX_KERNEXEC
22820 + " movl %0, %%cr0\n"
22821 +#endif
22822 +
22823 " jmp 2b\n"
22824 ".previous\n"
22825 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22826 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22827
22828 for (i = 0; i < (4096-320)/64; i++) {
22829 __asm__ __volatile__ (
22830 - "1: prefetch 320(%0)\n"
22831 - "2: movq (%0), %%mm0\n"
22832 - " movntq %%mm0, (%1)\n"
22833 - " movq 8(%0), %%mm1\n"
22834 - " movntq %%mm1, 8(%1)\n"
22835 - " movq 16(%0), %%mm2\n"
22836 - " movntq %%mm2, 16(%1)\n"
22837 - " movq 24(%0), %%mm3\n"
22838 - " movntq %%mm3, 24(%1)\n"
22839 - " movq 32(%0), %%mm4\n"
22840 - " movntq %%mm4, 32(%1)\n"
22841 - " movq 40(%0), %%mm5\n"
22842 - " movntq %%mm5, 40(%1)\n"
22843 - " movq 48(%0), %%mm6\n"
22844 - " movntq %%mm6, 48(%1)\n"
22845 - " movq 56(%0), %%mm7\n"
22846 - " movntq %%mm7, 56(%1)\n"
22847 + "1: prefetch 320(%1)\n"
22848 + "2: movq (%1), %%mm0\n"
22849 + " movntq %%mm0, (%2)\n"
22850 + " movq 8(%1), %%mm1\n"
22851 + " movntq %%mm1, 8(%2)\n"
22852 + " movq 16(%1), %%mm2\n"
22853 + " movntq %%mm2, 16(%2)\n"
22854 + " movq 24(%1), %%mm3\n"
22855 + " movntq %%mm3, 24(%2)\n"
22856 + " movq 32(%1), %%mm4\n"
22857 + " movntq %%mm4, 32(%2)\n"
22858 + " movq 40(%1), %%mm5\n"
22859 + " movntq %%mm5, 40(%2)\n"
22860 + " movq 48(%1), %%mm6\n"
22861 + " movntq %%mm6, 48(%2)\n"
22862 + " movq 56(%1), %%mm7\n"
22863 + " movntq %%mm7, 56(%2)\n"
22864 ".section .fixup, \"ax\"\n"
22865 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22866 + "3:\n"
22867 +
22868 +#ifdef CONFIG_PAX_KERNEXEC
22869 + " movl %%cr0, %0\n"
22870 + " movl %0, %%eax\n"
22871 + " andl $0xFFFEFFFF, %%eax\n"
22872 + " movl %%eax, %%cr0\n"
22873 +#endif
22874 +
22875 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22876 +
22877 +#ifdef CONFIG_PAX_KERNEXEC
22878 + " movl %0, %%cr0\n"
22879 +#endif
22880 +
22881 " jmp 2b\n"
22882 ".previous\n"
22883 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22884 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22885
22886 from += 64;
22887 to += 64;
22888 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22889 static void fast_copy_page(void *to, void *from)
22890 {
22891 int i;
22892 + unsigned long cr0;
22893
22894 kernel_fpu_begin();
22895
22896 __asm__ __volatile__ (
22897 - "1: prefetch (%0)\n"
22898 - " prefetch 64(%0)\n"
22899 - " prefetch 128(%0)\n"
22900 - " prefetch 192(%0)\n"
22901 - " prefetch 256(%0)\n"
22902 + "1: prefetch (%1)\n"
22903 + " prefetch 64(%1)\n"
22904 + " prefetch 128(%1)\n"
22905 + " prefetch 192(%1)\n"
22906 + " prefetch 256(%1)\n"
22907 "2: \n"
22908 ".section .fixup, \"ax\"\n"
22909 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22910 + "3: \n"
22911 +
22912 +#ifdef CONFIG_PAX_KERNEXEC
22913 + " movl %%cr0, %0\n"
22914 + " movl %0, %%eax\n"
22915 + " andl $0xFFFEFFFF, %%eax\n"
22916 + " movl %%eax, %%cr0\n"
22917 +#endif
22918 +
22919 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22920 +
22921 +#ifdef CONFIG_PAX_KERNEXEC
22922 + " movl %0, %%cr0\n"
22923 +#endif
22924 +
22925 " jmp 2b\n"
22926 ".previous\n"
22927 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22928 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22929
22930 for (i = 0; i < 4096/64; i++) {
22931 __asm__ __volatile__ (
22932 - "1: prefetch 320(%0)\n"
22933 - "2: movq (%0), %%mm0\n"
22934 - " movq 8(%0), %%mm1\n"
22935 - " movq 16(%0), %%mm2\n"
22936 - " movq 24(%0), %%mm3\n"
22937 - " movq %%mm0, (%1)\n"
22938 - " movq %%mm1, 8(%1)\n"
22939 - " movq %%mm2, 16(%1)\n"
22940 - " movq %%mm3, 24(%1)\n"
22941 - " movq 32(%0), %%mm0\n"
22942 - " movq 40(%0), %%mm1\n"
22943 - " movq 48(%0), %%mm2\n"
22944 - " movq 56(%0), %%mm3\n"
22945 - " movq %%mm0, 32(%1)\n"
22946 - " movq %%mm1, 40(%1)\n"
22947 - " movq %%mm2, 48(%1)\n"
22948 - " movq %%mm3, 56(%1)\n"
22949 + "1: prefetch 320(%1)\n"
22950 + "2: movq (%1), %%mm0\n"
22951 + " movq 8(%1), %%mm1\n"
22952 + " movq 16(%1), %%mm2\n"
22953 + " movq 24(%1), %%mm3\n"
22954 + " movq %%mm0, (%2)\n"
22955 + " movq %%mm1, 8(%2)\n"
22956 + " movq %%mm2, 16(%2)\n"
22957 + " movq %%mm3, 24(%2)\n"
22958 + " movq 32(%1), %%mm0\n"
22959 + " movq 40(%1), %%mm1\n"
22960 + " movq 48(%1), %%mm2\n"
22961 + " movq 56(%1), %%mm3\n"
22962 + " movq %%mm0, 32(%2)\n"
22963 + " movq %%mm1, 40(%2)\n"
22964 + " movq %%mm2, 48(%2)\n"
22965 + " movq %%mm3, 56(%2)\n"
22966 ".section .fixup, \"ax\"\n"
22967 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22968 + "3:\n"
22969 +
22970 +#ifdef CONFIG_PAX_KERNEXEC
22971 + " movl %%cr0, %0\n"
22972 + " movl %0, %%eax\n"
22973 + " andl $0xFFFEFFFF, %%eax\n"
22974 + " movl %%eax, %%cr0\n"
22975 +#endif
22976 +
22977 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22978 +
22979 +#ifdef CONFIG_PAX_KERNEXEC
22980 + " movl %0, %%cr0\n"
22981 +#endif
22982 +
22983 " jmp 2b\n"
22984 ".previous\n"
22985 _ASM_EXTABLE(1b, 3b)
22986 - : : "r" (from), "r" (to) : "memory");
22987 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22988
22989 from += 64;
22990 to += 64;
22991 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22992 index 69fa106..adda88b 100644
22993 --- a/arch/x86/lib/msr-reg.S
22994 +++ b/arch/x86/lib/msr-reg.S
22995 @@ -3,6 +3,7 @@
22996 #include <asm/dwarf2.h>
22997 #include <asm/asm.h>
22998 #include <asm/msr.h>
22999 +#include <asm/alternative-asm.h>
23000
23001 #ifdef CONFIG_X86_64
23002 /*
23003 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
23004 CFI_STARTPROC
23005 pushq_cfi %rbx
23006 pushq_cfi %rbp
23007 - movq %rdi, %r10 /* Save pointer */
23008 + movq %rdi, %r9 /* Save pointer */
23009 xorl %r11d, %r11d /* Return value */
23010 movl (%rdi), %eax
23011 movl 4(%rdi), %ecx
23012 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
23013 movl 28(%rdi), %edi
23014 CFI_REMEMBER_STATE
23015 1: \op
23016 -2: movl %eax, (%r10)
23017 +2: movl %eax, (%r9)
23018 movl %r11d, %eax /* Return value */
23019 - movl %ecx, 4(%r10)
23020 - movl %edx, 8(%r10)
23021 - movl %ebx, 12(%r10)
23022 - movl %ebp, 20(%r10)
23023 - movl %esi, 24(%r10)
23024 - movl %edi, 28(%r10)
23025 + movl %ecx, 4(%r9)
23026 + movl %edx, 8(%r9)
23027 + movl %ebx, 12(%r9)
23028 + movl %ebp, 20(%r9)
23029 + movl %esi, 24(%r9)
23030 + movl %edi, 28(%r9)
23031 popq_cfi %rbp
23032 popq_cfi %rbx
23033 + pax_force_retaddr
23034 ret
23035 3:
23036 CFI_RESTORE_STATE
23037 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
23038 index 36b0d15..d381858 100644
23039 --- a/arch/x86/lib/putuser.S
23040 +++ b/arch/x86/lib/putuser.S
23041 @@ -15,7 +15,9 @@
23042 #include <asm/thread_info.h>
23043 #include <asm/errno.h>
23044 #include <asm/asm.h>
23045 -
23046 +#include <asm/segment.h>
23047 +#include <asm/pgtable.h>
23048 +#include <asm/alternative-asm.h>
23049
23050 /*
23051 * __put_user_X
23052 @@ -29,52 +31,119 @@
23053 * as they get called from within inline assembly.
23054 */
23055
23056 -#define ENTER CFI_STARTPROC ; \
23057 - GET_THREAD_INFO(%_ASM_BX)
23058 -#define EXIT ret ; \
23059 +#define ENTER CFI_STARTPROC
23060 +#define EXIT pax_force_retaddr; ret ; \
23061 CFI_ENDPROC
23062
23063 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23064 +#define _DEST %_ASM_CX,%_ASM_BX
23065 +#else
23066 +#define _DEST %_ASM_CX
23067 +#endif
23068 +
23069 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23070 +#define __copyuser_seg gs;
23071 +#else
23072 +#define __copyuser_seg
23073 +#endif
23074 +
23075 .text
23076 ENTRY(__put_user_1)
23077 ENTER
23078 +
23079 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23080 + GET_THREAD_INFO(%_ASM_BX)
23081 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
23082 jae bad_put_user
23083 -1: movb %al,(%_ASM_CX)
23084 +
23085 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23086 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23087 + cmp %_ASM_BX,%_ASM_CX
23088 + jb 1234f
23089 + xor %ebx,%ebx
23090 +1234:
23091 +#endif
23092 +
23093 +#endif
23094 +
23095 +1: __copyuser_seg movb %al,(_DEST)
23096 xor %eax,%eax
23097 EXIT
23098 ENDPROC(__put_user_1)
23099
23100 ENTRY(__put_user_2)
23101 ENTER
23102 +
23103 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23104 + GET_THREAD_INFO(%_ASM_BX)
23105 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23106 sub $1,%_ASM_BX
23107 cmp %_ASM_BX,%_ASM_CX
23108 jae bad_put_user
23109 -2: movw %ax,(%_ASM_CX)
23110 +
23111 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23112 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23113 + cmp %_ASM_BX,%_ASM_CX
23114 + jb 1234f
23115 + xor %ebx,%ebx
23116 +1234:
23117 +#endif
23118 +
23119 +#endif
23120 +
23121 +2: __copyuser_seg movw %ax,(_DEST)
23122 xor %eax,%eax
23123 EXIT
23124 ENDPROC(__put_user_2)
23125
23126 ENTRY(__put_user_4)
23127 ENTER
23128 +
23129 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23130 + GET_THREAD_INFO(%_ASM_BX)
23131 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23132 sub $3,%_ASM_BX
23133 cmp %_ASM_BX,%_ASM_CX
23134 jae bad_put_user
23135 -3: movl %eax,(%_ASM_CX)
23136 +
23137 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23138 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23139 + cmp %_ASM_BX,%_ASM_CX
23140 + jb 1234f
23141 + xor %ebx,%ebx
23142 +1234:
23143 +#endif
23144 +
23145 +#endif
23146 +
23147 +3: __copyuser_seg movl %eax,(_DEST)
23148 xor %eax,%eax
23149 EXIT
23150 ENDPROC(__put_user_4)
23151
23152 ENTRY(__put_user_8)
23153 ENTER
23154 +
23155 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23156 + GET_THREAD_INFO(%_ASM_BX)
23157 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23158 sub $7,%_ASM_BX
23159 cmp %_ASM_BX,%_ASM_CX
23160 jae bad_put_user
23161 -4: mov %_ASM_AX,(%_ASM_CX)
23162 +
23163 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23164 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23165 + cmp %_ASM_BX,%_ASM_CX
23166 + jb 1234f
23167 + xor %ebx,%ebx
23168 +1234:
23169 +#endif
23170 +
23171 +#endif
23172 +
23173 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
23174 #ifdef CONFIG_X86_32
23175 -5: movl %edx,4(%_ASM_CX)
23176 +5: __copyuser_seg movl %edx,4(_DEST)
23177 #endif
23178 xor %eax,%eax
23179 EXIT
23180 diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
23181 index 05ea55f..6345b9a 100644
23182 --- a/arch/x86/lib/rwlock_64.S
23183 +++ b/arch/x86/lib/rwlock_64.S
23184 @@ -2,6 +2,7 @@
23185
23186 #include <linux/linkage.h>
23187 #include <asm/rwlock.h>
23188 +#include <asm/asm.h>
23189 #include <asm/alternative-asm.h>
23190 #include <asm/dwarf2.h>
23191
23192 @@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
23193 CFI_STARTPROC
23194 LOCK_PREFIX
23195 addl $RW_LOCK_BIAS,(%rdi)
23196 +
23197 +#ifdef CONFIG_PAX_REFCOUNT
23198 + jno 1234f
23199 + LOCK_PREFIX
23200 + subl $RW_LOCK_BIAS,(%rdi)
23201 + int $4
23202 +1234:
23203 + _ASM_EXTABLE(1234b, 1234b)
23204 +#endif
23205 +
23206 1: rep
23207 nop
23208 cmpl $RW_LOCK_BIAS,(%rdi)
23209 jne 1b
23210 LOCK_PREFIX
23211 subl $RW_LOCK_BIAS,(%rdi)
23212 +
23213 +#ifdef CONFIG_PAX_REFCOUNT
23214 + jno 1234f
23215 + LOCK_PREFIX
23216 + addl $RW_LOCK_BIAS,(%rdi)
23217 + int $4
23218 +1234:
23219 + _ASM_EXTABLE(1234b, 1234b)
23220 +#endif
23221 +
23222 jnz __write_lock_failed
23223 + pax_force_retaddr
23224 ret
23225 CFI_ENDPROC
23226 END(__write_lock_failed)
23227 @@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
23228 CFI_STARTPROC
23229 LOCK_PREFIX
23230 incl (%rdi)
23231 +
23232 +#ifdef CONFIG_PAX_REFCOUNT
23233 + jno 1234f
23234 + LOCK_PREFIX
23235 + decl (%rdi)
23236 + int $4
23237 +1234:
23238 + _ASM_EXTABLE(1234b, 1234b)
23239 +#endif
23240 +
23241 1: rep
23242 nop
23243 cmpl $1,(%rdi)
23244 js 1b
23245 LOCK_PREFIX
23246 decl (%rdi)
23247 +
23248 +#ifdef CONFIG_PAX_REFCOUNT
23249 + jno 1234f
23250 + LOCK_PREFIX
23251 + incl (%rdi)
23252 + int $4
23253 +1234:
23254 + _ASM_EXTABLE(1234b, 1234b)
23255 +#endif
23256 +
23257 js __read_lock_failed
23258 + pax_force_retaddr
23259 ret
23260 CFI_ENDPROC
23261 END(__read_lock_failed)
23262 diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
23263 index 15acecf..f768b10 100644
23264 --- a/arch/x86/lib/rwsem_64.S
23265 +++ b/arch/x86/lib/rwsem_64.S
23266 @@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
23267 call rwsem_down_read_failed
23268 popq %rdx
23269 restore_common_regs
23270 + pax_force_retaddr
23271 ret
23272 ENDPROC(call_rwsem_down_read_failed)
23273
23274 @@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
23275 movq %rax,%rdi
23276 call rwsem_down_write_failed
23277 restore_common_regs
23278 + pax_force_retaddr
23279 ret
23280 ENDPROC(call_rwsem_down_write_failed)
23281
23282 @@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
23283 movq %rax,%rdi
23284 call rwsem_wake
23285 restore_common_regs
23286 -1: ret
23287 +1: pax_force_retaddr
23288 + ret
23289 ENDPROC(call_rwsem_wake)
23290
23291 /* Fix up special calling conventions */
23292 @@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
23293 call rwsem_downgrade_wake
23294 popq %rdx
23295 restore_common_regs
23296 + pax_force_retaddr
23297 ret
23298 ENDPROC(call_rwsem_downgrade_wake)
23299 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23300 index bf9a7d5..fb06ab5 100644
23301 --- a/arch/x86/lib/thunk_64.S
23302 +++ b/arch/x86/lib/thunk_64.S
23303 @@ -10,7 +10,8 @@
23304 #include <asm/dwarf2.h>
23305 #include <asm/calling.h>
23306 #include <asm/rwlock.h>
23307 -
23308 + #include <asm/alternative-asm.h>
23309 +
23310 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23311 .macro thunk name,func
23312 .globl \name
23313 @@ -70,6 +71,7 @@
23314 SAVE_ARGS
23315 restore:
23316 RESTORE_ARGS
23317 + pax_force_retaddr
23318 ret
23319 CFI_ENDPROC
23320
23321 @@ -77,5 +79,6 @@ restore:
23322 SAVE_ARGS
23323 restore_norax:
23324 RESTORE_ARGS 1
23325 + pax_force_retaddr
23326 ret
23327 CFI_ENDPROC
23328 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23329 index 1f118d4..ec4a953 100644
23330 --- a/arch/x86/lib/usercopy_32.c
23331 +++ b/arch/x86/lib/usercopy_32.c
23332 @@ -43,7 +43,7 @@ do { \
23333 __asm__ __volatile__( \
23334 " testl %1,%1\n" \
23335 " jz 2f\n" \
23336 - "0: lodsb\n" \
23337 + "0: "__copyuser_seg"lodsb\n" \
23338 " stosb\n" \
23339 " testb %%al,%%al\n" \
23340 " jz 1f\n" \
23341 @@ -128,10 +128,12 @@ do { \
23342 int __d0; \
23343 might_fault(); \
23344 __asm__ __volatile__( \
23345 + __COPYUSER_SET_ES \
23346 "0: rep; stosl\n" \
23347 " movl %2,%0\n" \
23348 "1: rep; stosb\n" \
23349 "2:\n" \
23350 + __COPYUSER_RESTORE_ES \
23351 ".section .fixup,\"ax\"\n" \
23352 "3: lea 0(%2,%0,4),%0\n" \
23353 " jmp 2b\n" \
23354 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23355 might_fault();
23356
23357 __asm__ __volatile__(
23358 + __COPYUSER_SET_ES
23359 " testl %0, %0\n"
23360 " jz 3f\n"
23361 " andl %0,%%ecx\n"
23362 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23363 " subl %%ecx,%0\n"
23364 " addl %0,%%eax\n"
23365 "1:\n"
23366 + __COPYUSER_RESTORE_ES
23367 ".section .fixup,\"ax\"\n"
23368 "2: xorl %%eax,%%eax\n"
23369 " jmp 1b\n"
23370 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23371
23372 #ifdef CONFIG_X86_INTEL_USERCOPY
23373 static unsigned long
23374 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
23375 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23376 {
23377 int d0, d1;
23378 __asm__ __volatile__(
23379 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23380 " .align 2,0x90\n"
23381 "3: movl 0(%4), %%eax\n"
23382 "4: movl 4(%4), %%edx\n"
23383 - "5: movl %%eax, 0(%3)\n"
23384 - "6: movl %%edx, 4(%3)\n"
23385 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23386 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23387 "7: movl 8(%4), %%eax\n"
23388 "8: movl 12(%4),%%edx\n"
23389 - "9: movl %%eax, 8(%3)\n"
23390 - "10: movl %%edx, 12(%3)\n"
23391 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23392 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23393 "11: movl 16(%4), %%eax\n"
23394 "12: movl 20(%4), %%edx\n"
23395 - "13: movl %%eax, 16(%3)\n"
23396 - "14: movl %%edx, 20(%3)\n"
23397 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23398 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23399 "15: movl 24(%4), %%eax\n"
23400 "16: movl 28(%4), %%edx\n"
23401 - "17: movl %%eax, 24(%3)\n"
23402 - "18: movl %%edx, 28(%3)\n"
23403 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23404 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23405 "19: movl 32(%4), %%eax\n"
23406 "20: movl 36(%4), %%edx\n"
23407 - "21: movl %%eax, 32(%3)\n"
23408 - "22: movl %%edx, 36(%3)\n"
23409 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23410 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23411 "23: movl 40(%4), %%eax\n"
23412 "24: movl 44(%4), %%edx\n"
23413 - "25: movl %%eax, 40(%3)\n"
23414 - "26: movl %%edx, 44(%3)\n"
23415 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23416 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23417 "27: movl 48(%4), %%eax\n"
23418 "28: movl 52(%4), %%edx\n"
23419 - "29: movl %%eax, 48(%3)\n"
23420 - "30: movl %%edx, 52(%3)\n"
23421 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23422 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23423 "31: movl 56(%4), %%eax\n"
23424 "32: movl 60(%4), %%edx\n"
23425 - "33: movl %%eax, 56(%3)\n"
23426 - "34: movl %%edx, 60(%3)\n"
23427 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23428 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23429 " addl $-64, %0\n"
23430 " addl $64, %4\n"
23431 " addl $64, %3\n"
23432 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23433 " shrl $2, %0\n"
23434 " andl $3, %%eax\n"
23435 " cld\n"
23436 + __COPYUSER_SET_ES
23437 "99: rep; movsl\n"
23438 "36: movl %%eax, %0\n"
23439 "37: rep; movsb\n"
23440 "100:\n"
23441 + __COPYUSER_RESTORE_ES
23442 + ".section .fixup,\"ax\"\n"
23443 + "101: lea 0(%%eax,%0,4),%0\n"
23444 + " jmp 100b\n"
23445 + ".previous\n"
23446 + ".section __ex_table,\"a\"\n"
23447 + " .align 4\n"
23448 + " .long 1b,100b\n"
23449 + " .long 2b,100b\n"
23450 + " .long 3b,100b\n"
23451 + " .long 4b,100b\n"
23452 + " .long 5b,100b\n"
23453 + " .long 6b,100b\n"
23454 + " .long 7b,100b\n"
23455 + " .long 8b,100b\n"
23456 + " .long 9b,100b\n"
23457 + " .long 10b,100b\n"
23458 + " .long 11b,100b\n"
23459 + " .long 12b,100b\n"
23460 + " .long 13b,100b\n"
23461 + " .long 14b,100b\n"
23462 + " .long 15b,100b\n"
23463 + " .long 16b,100b\n"
23464 + " .long 17b,100b\n"
23465 + " .long 18b,100b\n"
23466 + " .long 19b,100b\n"
23467 + " .long 20b,100b\n"
23468 + " .long 21b,100b\n"
23469 + " .long 22b,100b\n"
23470 + " .long 23b,100b\n"
23471 + " .long 24b,100b\n"
23472 + " .long 25b,100b\n"
23473 + " .long 26b,100b\n"
23474 + " .long 27b,100b\n"
23475 + " .long 28b,100b\n"
23476 + " .long 29b,100b\n"
23477 + " .long 30b,100b\n"
23478 + " .long 31b,100b\n"
23479 + " .long 32b,100b\n"
23480 + " .long 33b,100b\n"
23481 + " .long 34b,100b\n"
23482 + " .long 35b,100b\n"
23483 + " .long 36b,100b\n"
23484 + " .long 37b,100b\n"
23485 + " .long 99b,101b\n"
23486 + ".previous"
23487 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23488 + : "1"(to), "2"(from), "0"(size)
23489 + : "eax", "edx", "memory");
23490 + return size;
23491 +}
23492 +
23493 +static unsigned long
23494 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23495 +{
23496 + int d0, d1;
23497 + __asm__ __volatile__(
23498 + " .align 2,0x90\n"
23499 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23500 + " cmpl $67, %0\n"
23501 + " jbe 3f\n"
23502 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23503 + " .align 2,0x90\n"
23504 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23505 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23506 + "5: movl %%eax, 0(%3)\n"
23507 + "6: movl %%edx, 4(%3)\n"
23508 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23509 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23510 + "9: movl %%eax, 8(%3)\n"
23511 + "10: movl %%edx, 12(%3)\n"
23512 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23513 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23514 + "13: movl %%eax, 16(%3)\n"
23515 + "14: movl %%edx, 20(%3)\n"
23516 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23517 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23518 + "17: movl %%eax, 24(%3)\n"
23519 + "18: movl %%edx, 28(%3)\n"
23520 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23521 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23522 + "21: movl %%eax, 32(%3)\n"
23523 + "22: movl %%edx, 36(%3)\n"
23524 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23525 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23526 + "25: movl %%eax, 40(%3)\n"
23527 + "26: movl %%edx, 44(%3)\n"
23528 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23529 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23530 + "29: movl %%eax, 48(%3)\n"
23531 + "30: movl %%edx, 52(%3)\n"
23532 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23533 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23534 + "33: movl %%eax, 56(%3)\n"
23535 + "34: movl %%edx, 60(%3)\n"
23536 + " addl $-64, %0\n"
23537 + " addl $64, %4\n"
23538 + " addl $64, %3\n"
23539 + " cmpl $63, %0\n"
23540 + " ja 1b\n"
23541 + "35: movl %0, %%eax\n"
23542 + " shrl $2, %0\n"
23543 + " andl $3, %%eax\n"
23544 + " cld\n"
23545 + "99: rep; "__copyuser_seg" movsl\n"
23546 + "36: movl %%eax, %0\n"
23547 + "37: rep; "__copyuser_seg" movsb\n"
23548 + "100:\n"
23549 ".section .fixup,\"ax\"\n"
23550 "101: lea 0(%%eax,%0,4),%0\n"
23551 " jmp 100b\n"
23552 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23553 int d0, d1;
23554 __asm__ __volatile__(
23555 " .align 2,0x90\n"
23556 - "0: movl 32(%4), %%eax\n"
23557 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23558 " cmpl $67, %0\n"
23559 " jbe 2f\n"
23560 - "1: movl 64(%4), %%eax\n"
23561 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23562 " .align 2,0x90\n"
23563 - "2: movl 0(%4), %%eax\n"
23564 - "21: movl 4(%4), %%edx\n"
23565 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23566 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23567 " movl %%eax, 0(%3)\n"
23568 " movl %%edx, 4(%3)\n"
23569 - "3: movl 8(%4), %%eax\n"
23570 - "31: movl 12(%4),%%edx\n"
23571 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23572 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23573 " movl %%eax, 8(%3)\n"
23574 " movl %%edx, 12(%3)\n"
23575 - "4: movl 16(%4), %%eax\n"
23576 - "41: movl 20(%4), %%edx\n"
23577 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23578 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23579 " movl %%eax, 16(%3)\n"
23580 " movl %%edx, 20(%3)\n"
23581 - "10: movl 24(%4), %%eax\n"
23582 - "51: movl 28(%4), %%edx\n"
23583 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23584 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23585 " movl %%eax, 24(%3)\n"
23586 " movl %%edx, 28(%3)\n"
23587 - "11: movl 32(%4), %%eax\n"
23588 - "61: movl 36(%4), %%edx\n"
23589 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23590 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23591 " movl %%eax, 32(%3)\n"
23592 " movl %%edx, 36(%3)\n"
23593 - "12: movl 40(%4), %%eax\n"
23594 - "71: movl 44(%4), %%edx\n"
23595 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23596 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23597 " movl %%eax, 40(%3)\n"
23598 " movl %%edx, 44(%3)\n"
23599 - "13: movl 48(%4), %%eax\n"
23600 - "81: movl 52(%4), %%edx\n"
23601 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23602 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23603 " movl %%eax, 48(%3)\n"
23604 " movl %%edx, 52(%3)\n"
23605 - "14: movl 56(%4), %%eax\n"
23606 - "91: movl 60(%4), %%edx\n"
23607 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23608 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23609 " movl %%eax, 56(%3)\n"
23610 " movl %%edx, 60(%3)\n"
23611 " addl $-64, %0\n"
23612 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23613 " shrl $2, %0\n"
23614 " andl $3, %%eax\n"
23615 " cld\n"
23616 - "6: rep; movsl\n"
23617 + "6: rep; "__copyuser_seg" movsl\n"
23618 " movl %%eax,%0\n"
23619 - "7: rep; movsb\n"
23620 + "7: rep; "__copyuser_seg" movsb\n"
23621 "8:\n"
23622 ".section .fixup,\"ax\"\n"
23623 "9: lea 0(%%eax,%0,4),%0\n"
23624 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23625
23626 __asm__ __volatile__(
23627 " .align 2,0x90\n"
23628 - "0: movl 32(%4), %%eax\n"
23629 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23630 " cmpl $67, %0\n"
23631 " jbe 2f\n"
23632 - "1: movl 64(%4), %%eax\n"
23633 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23634 " .align 2,0x90\n"
23635 - "2: movl 0(%4), %%eax\n"
23636 - "21: movl 4(%4), %%edx\n"
23637 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23638 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23639 " movnti %%eax, 0(%3)\n"
23640 " movnti %%edx, 4(%3)\n"
23641 - "3: movl 8(%4), %%eax\n"
23642 - "31: movl 12(%4),%%edx\n"
23643 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23644 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23645 " movnti %%eax, 8(%3)\n"
23646 " movnti %%edx, 12(%3)\n"
23647 - "4: movl 16(%4), %%eax\n"
23648 - "41: movl 20(%4), %%edx\n"
23649 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23650 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23651 " movnti %%eax, 16(%3)\n"
23652 " movnti %%edx, 20(%3)\n"
23653 - "10: movl 24(%4), %%eax\n"
23654 - "51: movl 28(%4), %%edx\n"
23655 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23656 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23657 " movnti %%eax, 24(%3)\n"
23658 " movnti %%edx, 28(%3)\n"
23659 - "11: movl 32(%4), %%eax\n"
23660 - "61: movl 36(%4), %%edx\n"
23661 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23662 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23663 " movnti %%eax, 32(%3)\n"
23664 " movnti %%edx, 36(%3)\n"
23665 - "12: movl 40(%4), %%eax\n"
23666 - "71: movl 44(%4), %%edx\n"
23667 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23668 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23669 " movnti %%eax, 40(%3)\n"
23670 " movnti %%edx, 44(%3)\n"
23671 - "13: movl 48(%4), %%eax\n"
23672 - "81: movl 52(%4), %%edx\n"
23673 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23674 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23675 " movnti %%eax, 48(%3)\n"
23676 " movnti %%edx, 52(%3)\n"
23677 - "14: movl 56(%4), %%eax\n"
23678 - "91: movl 60(%4), %%edx\n"
23679 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23680 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23681 " movnti %%eax, 56(%3)\n"
23682 " movnti %%edx, 60(%3)\n"
23683 " addl $-64, %0\n"
23684 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23685 " shrl $2, %0\n"
23686 " andl $3, %%eax\n"
23687 " cld\n"
23688 - "6: rep; movsl\n"
23689 + "6: rep; "__copyuser_seg" movsl\n"
23690 " movl %%eax,%0\n"
23691 - "7: rep; movsb\n"
23692 + "7: rep; "__copyuser_seg" movsb\n"
23693 "8:\n"
23694 ".section .fixup,\"ax\"\n"
23695 "9: lea 0(%%eax,%0,4),%0\n"
23696 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23697
23698 __asm__ __volatile__(
23699 " .align 2,0x90\n"
23700 - "0: movl 32(%4), %%eax\n"
23701 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23702 " cmpl $67, %0\n"
23703 " jbe 2f\n"
23704 - "1: movl 64(%4), %%eax\n"
23705 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23706 " .align 2,0x90\n"
23707 - "2: movl 0(%4), %%eax\n"
23708 - "21: movl 4(%4), %%edx\n"
23709 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23710 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23711 " movnti %%eax, 0(%3)\n"
23712 " movnti %%edx, 4(%3)\n"
23713 - "3: movl 8(%4), %%eax\n"
23714 - "31: movl 12(%4),%%edx\n"
23715 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23716 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23717 " movnti %%eax, 8(%3)\n"
23718 " movnti %%edx, 12(%3)\n"
23719 - "4: movl 16(%4), %%eax\n"
23720 - "41: movl 20(%4), %%edx\n"
23721 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23722 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23723 " movnti %%eax, 16(%3)\n"
23724 " movnti %%edx, 20(%3)\n"
23725 - "10: movl 24(%4), %%eax\n"
23726 - "51: movl 28(%4), %%edx\n"
23727 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23728 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23729 " movnti %%eax, 24(%3)\n"
23730 " movnti %%edx, 28(%3)\n"
23731 - "11: movl 32(%4), %%eax\n"
23732 - "61: movl 36(%4), %%edx\n"
23733 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23734 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23735 " movnti %%eax, 32(%3)\n"
23736 " movnti %%edx, 36(%3)\n"
23737 - "12: movl 40(%4), %%eax\n"
23738 - "71: movl 44(%4), %%edx\n"
23739 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23740 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23741 " movnti %%eax, 40(%3)\n"
23742 " movnti %%edx, 44(%3)\n"
23743 - "13: movl 48(%4), %%eax\n"
23744 - "81: movl 52(%4), %%edx\n"
23745 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23746 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23747 " movnti %%eax, 48(%3)\n"
23748 " movnti %%edx, 52(%3)\n"
23749 - "14: movl 56(%4), %%eax\n"
23750 - "91: movl 60(%4), %%edx\n"
23751 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23752 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23753 " movnti %%eax, 56(%3)\n"
23754 " movnti %%edx, 60(%3)\n"
23755 " addl $-64, %0\n"
23756 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23757 " shrl $2, %0\n"
23758 " andl $3, %%eax\n"
23759 " cld\n"
23760 - "6: rep; movsl\n"
23761 + "6: rep; "__copyuser_seg" movsl\n"
23762 " movl %%eax,%0\n"
23763 - "7: rep; movsb\n"
23764 + "7: rep; "__copyuser_seg" movsb\n"
23765 "8:\n"
23766 ".section .fixup,\"ax\"\n"
23767 "9: lea 0(%%eax,%0,4),%0\n"
23768 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23769 */
23770 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23771 unsigned long size);
23772 -unsigned long __copy_user_intel(void __user *to, const void *from,
23773 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23774 + unsigned long size);
23775 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23776 unsigned long size);
23777 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23778 const void __user *from, unsigned long size);
23779 #endif /* CONFIG_X86_INTEL_USERCOPY */
23780
23781 /* Generic arbitrary sized copy. */
23782 -#define __copy_user(to, from, size) \
23783 +#define __copy_user(to, from, size, prefix, set, restore) \
23784 do { \
23785 int __d0, __d1, __d2; \
23786 __asm__ __volatile__( \
23787 + set \
23788 " cmp $7,%0\n" \
23789 " jbe 1f\n" \
23790 " movl %1,%0\n" \
23791 " negl %0\n" \
23792 " andl $7,%0\n" \
23793 " subl %0,%3\n" \
23794 - "4: rep; movsb\n" \
23795 + "4: rep; "prefix"movsb\n" \
23796 " movl %3,%0\n" \
23797 " shrl $2,%0\n" \
23798 " andl $3,%3\n" \
23799 " .align 2,0x90\n" \
23800 - "0: rep; movsl\n" \
23801 + "0: rep; "prefix"movsl\n" \
23802 " movl %3,%0\n" \
23803 - "1: rep; movsb\n" \
23804 + "1: rep; "prefix"movsb\n" \
23805 "2:\n" \
23806 + restore \
23807 ".section .fixup,\"ax\"\n" \
23808 "5: addl %3,%0\n" \
23809 " jmp 2b\n" \
23810 @@ -682,14 +799,14 @@ do { \
23811 " negl %0\n" \
23812 " andl $7,%0\n" \
23813 " subl %0,%3\n" \
23814 - "4: rep; movsb\n" \
23815 + "4: rep; "__copyuser_seg"movsb\n" \
23816 " movl %3,%0\n" \
23817 " shrl $2,%0\n" \
23818 " andl $3,%3\n" \
23819 " .align 2,0x90\n" \
23820 - "0: rep; movsl\n" \
23821 + "0: rep; "__copyuser_seg"movsl\n" \
23822 " movl %3,%0\n" \
23823 - "1: rep; movsb\n" \
23824 + "1: rep; "__copyuser_seg"movsb\n" \
23825 "2:\n" \
23826 ".section .fixup,\"ax\"\n" \
23827 "5: addl %3,%0\n" \
23828 @@ -775,9 +892,9 @@ survive:
23829 }
23830 #endif
23831 if (movsl_is_ok(to, from, n))
23832 - __copy_user(to, from, n);
23833 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23834 else
23835 - n = __copy_user_intel(to, from, n);
23836 + n = __generic_copy_to_user_intel(to, from, n);
23837 return n;
23838 }
23839 EXPORT_SYMBOL(__copy_to_user_ll);
23840 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23841 unsigned long n)
23842 {
23843 if (movsl_is_ok(to, from, n))
23844 - __copy_user(to, from, n);
23845 + __copy_user(to, from, n, __copyuser_seg, "", "");
23846 else
23847 - n = __copy_user_intel((void __user *)to,
23848 - (const void *)from, n);
23849 + n = __generic_copy_from_user_intel(to, from, n);
23850 return n;
23851 }
23852 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23853 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23854 if (n > 64 && cpu_has_xmm2)
23855 n = __copy_user_intel_nocache(to, from, n);
23856 else
23857 - __copy_user(to, from, n);
23858 + __copy_user(to, from, n, __copyuser_seg, "", "");
23859 #else
23860 - __copy_user(to, from, n);
23861 + __copy_user(to, from, n, __copyuser_seg, "", "");
23862 #endif
23863 return n;
23864 }
23865 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23866
23867 -/**
23868 - * copy_to_user: - Copy a block of data into user space.
23869 - * @to: Destination address, in user space.
23870 - * @from: Source address, in kernel space.
23871 - * @n: Number of bytes to copy.
23872 - *
23873 - * Context: User context only. This function may sleep.
23874 - *
23875 - * Copy data from kernel space to user space.
23876 - *
23877 - * Returns number of bytes that could not be copied.
23878 - * On success, this will be zero.
23879 - */
23880 -unsigned long
23881 -copy_to_user(void __user *to, const void *from, unsigned long n)
23882 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23883 +void __set_fs(mm_segment_t x)
23884 {
23885 - if (access_ok(VERIFY_WRITE, to, n))
23886 - n = __copy_to_user(to, from, n);
23887 - return n;
23888 + switch (x.seg) {
23889 + case 0:
23890 + loadsegment(gs, 0);
23891 + break;
23892 + case TASK_SIZE_MAX:
23893 + loadsegment(gs, __USER_DS);
23894 + break;
23895 + case -1UL:
23896 + loadsegment(gs, __KERNEL_DS);
23897 + break;
23898 + default:
23899 + BUG();
23900 + }
23901 + return;
23902 }
23903 -EXPORT_SYMBOL(copy_to_user);
23904 +EXPORT_SYMBOL(__set_fs);
23905
23906 -/**
23907 - * copy_from_user: - Copy a block of data from user space.
23908 - * @to: Destination address, in kernel space.
23909 - * @from: Source address, in user space.
23910 - * @n: Number of bytes to copy.
23911 - *
23912 - * Context: User context only. This function may sleep.
23913 - *
23914 - * Copy data from user space to kernel space.
23915 - *
23916 - * Returns number of bytes that could not be copied.
23917 - * On success, this will be zero.
23918 - *
23919 - * If some data could not be copied, this function will pad the copied
23920 - * data to the requested size using zero bytes.
23921 - */
23922 -unsigned long
23923 -copy_from_user(void *to, const void __user *from, unsigned long n)
23924 +void set_fs(mm_segment_t x)
23925 {
23926 - if (access_ok(VERIFY_READ, from, n))
23927 - n = __copy_from_user(to, from, n);
23928 - else
23929 - memset(to, 0, n);
23930 - return n;
23931 + current_thread_info()->addr_limit = x;
23932 + __set_fs(x);
23933 }
23934 -EXPORT_SYMBOL(copy_from_user);
23935 +EXPORT_SYMBOL(set_fs);
23936 +#endif
23937 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23938 index b7c2849..8633ad8 100644
23939 --- a/arch/x86/lib/usercopy_64.c
23940 +++ b/arch/x86/lib/usercopy_64.c
23941 @@ -42,6 +42,12 @@ long
23942 __strncpy_from_user(char *dst, const char __user *src, long count)
23943 {
23944 long res;
23945 +
23946 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23947 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23948 + src += PAX_USER_SHADOW_BASE;
23949 +#endif
23950 +
23951 __do_strncpy_from_user(dst, src, count, res);
23952 return res;
23953 }
23954 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23955 {
23956 long __d0;
23957 might_fault();
23958 +
23959 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23960 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23961 + addr += PAX_USER_SHADOW_BASE;
23962 +#endif
23963 +
23964 /* no memory constraint because it doesn't change any memory gcc knows
23965 about */
23966 asm volatile(
23967 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23968 }
23969 EXPORT_SYMBOL(strlen_user);
23970
23971 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23972 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23973 {
23974 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23975 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23976 - }
23977 - return len;
23978 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23979 +
23980 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23981 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23982 + to += PAX_USER_SHADOW_BASE;
23983 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23984 + from += PAX_USER_SHADOW_BASE;
23985 +#endif
23986 +
23987 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23988 + }
23989 + return len;
23990 }
23991 EXPORT_SYMBOL(copy_in_user);
23992
23993 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23994 * it is not necessary to optimize tail handling.
23995 */
23996 unsigned long
23997 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23998 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23999 {
24000 char c;
24001 unsigned zero_len;
24002 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
24003 index 61b41ca..5fef66a 100644
24004 --- a/arch/x86/mm/extable.c
24005 +++ b/arch/x86/mm/extable.c
24006 @@ -1,14 +1,71 @@
24007 #include <linux/module.h>
24008 #include <linux/spinlock.h>
24009 +#include <linux/sort.h>
24010 #include <asm/uaccess.h>
24011 +#include <asm/pgtable.h>
24012
24013 +/*
24014 + * The exception table needs to be sorted so that the binary
24015 + * search that we use to find entries in it works properly.
24016 + * This is used both for the kernel exception table and for
24017 + * the exception tables of modules that get loaded.
24018 + */
24019 +static int cmp_ex(const void *a, const void *b)
24020 +{
24021 + const struct exception_table_entry *x = a, *y = b;
24022 +
24023 + /* avoid overflow */
24024 + if (x->insn > y->insn)
24025 + return 1;
24026 + if (x->insn < y->insn)
24027 + return -1;
24028 + return 0;
24029 +}
24030 +
24031 +static void swap_ex(void *a, void *b, int size)
24032 +{
24033 + struct exception_table_entry t, *x = a, *y = b;
24034 +
24035 + t = *x;
24036 +
24037 + pax_open_kernel();
24038 + *x = *y;
24039 + *y = t;
24040 + pax_close_kernel();
24041 +}
24042 +
24043 +void sort_extable(struct exception_table_entry *start,
24044 + struct exception_table_entry *finish)
24045 +{
24046 + sort(start, finish - start, sizeof(struct exception_table_entry),
24047 + cmp_ex, swap_ex);
24048 +}
24049 +
24050 +#ifdef CONFIG_MODULES
24051 +/*
24052 + * If the exception table is sorted, any referring to the module init
24053 + * will be at the beginning or the end.
24054 + */
24055 +void trim_init_extable(struct module *m)
24056 +{
24057 + /*trim the beginning*/
24058 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
24059 + m->extable++;
24060 + m->num_exentries--;
24061 + }
24062 + /*trim the end*/
24063 + while (m->num_exentries &&
24064 + within_module_init(m->extable[m->num_exentries-1].insn, m))
24065 + m->num_exentries--;
24066 +}
24067 +#endif /* CONFIG_MODULES */
24068
24069 int fixup_exception(struct pt_regs *regs)
24070 {
24071 const struct exception_table_entry *fixup;
24072
24073 #ifdef CONFIG_PNPBIOS
24074 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
24075 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
24076 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
24077 extern u32 pnp_bios_is_utter_crap;
24078 pnp_bios_is_utter_crap = 1;
24079 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
24080 index 8ac0d76..ca501e2 100644
24081 --- a/arch/x86/mm/fault.c
24082 +++ b/arch/x86/mm/fault.c
24083 @@ -11,10 +11,19 @@
24084 #include <linux/kprobes.h> /* __kprobes, ... */
24085 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
24086 #include <linux/perf_event.h> /* perf_sw_event */
24087 +#include <linux/unistd.h>
24088 +#include <linux/compiler.h>
24089
24090 #include <asm/traps.h> /* dotraplinkage, ... */
24091 #include <asm/pgalloc.h> /* pgd_*(), ... */
24092 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
24093 +#include <asm/vsyscall.h>
24094 +#include <asm/tlbflush.h>
24095 +
24096 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24097 +#include <asm/stacktrace.h>
24098 +#include "../kernel/dumpstack.h"
24099 +#endif
24100
24101 /*
24102 * Page fault error code bits:
24103 @@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
24104 int ret = 0;
24105
24106 /* kprobe_running() needs smp_processor_id() */
24107 - if (kprobes_built_in() && !user_mode_vm(regs)) {
24108 + if (kprobes_built_in() && !user_mode(regs)) {
24109 preempt_disable();
24110 if (kprobe_running() && kprobe_fault_handler(regs, 14))
24111 ret = 1;
24112 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
24113 return !instr_lo || (instr_lo>>1) == 1;
24114 case 0x00:
24115 /* Prefetch instruction is 0x0F0D or 0x0F18 */
24116 - if (probe_kernel_address(instr, opcode))
24117 + if (user_mode(regs)) {
24118 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24119 + return 0;
24120 + } else if (probe_kernel_address(instr, opcode))
24121 return 0;
24122
24123 *prefetch = (instr_lo == 0xF) &&
24124 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
24125 while (instr < max_instr) {
24126 unsigned char opcode;
24127
24128 - if (probe_kernel_address(instr, opcode))
24129 + if (user_mode(regs)) {
24130 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24131 + break;
24132 + } else if (probe_kernel_address(instr, opcode))
24133 break;
24134
24135 instr++;
24136 @@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
24137 force_sig_info(si_signo, &info, tsk);
24138 }
24139
24140 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24141 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
24142 +#endif
24143 +
24144 +#ifdef CONFIG_PAX_EMUTRAMP
24145 +static int pax_handle_fetch_fault(struct pt_regs *regs);
24146 +#endif
24147 +
24148 +#ifdef CONFIG_PAX_PAGEEXEC
24149 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
24150 +{
24151 + pgd_t *pgd;
24152 + pud_t *pud;
24153 + pmd_t *pmd;
24154 +
24155 + pgd = pgd_offset(mm, address);
24156 + if (!pgd_present(*pgd))
24157 + return NULL;
24158 + pud = pud_offset(pgd, address);
24159 + if (!pud_present(*pud))
24160 + return NULL;
24161 + pmd = pmd_offset(pud, address);
24162 + if (!pmd_present(*pmd))
24163 + return NULL;
24164 + return pmd;
24165 +}
24166 +#endif
24167 +
24168 DEFINE_SPINLOCK(pgd_lock);
24169 LIST_HEAD(pgd_list);
24170
24171 @@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
24172 address += PMD_SIZE) {
24173
24174 unsigned long flags;
24175 +
24176 +#ifdef CONFIG_PAX_PER_CPU_PGD
24177 + unsigned long cpu;
24178 +#else
24179 struct page *page;
24180 +#endif
24181
24182 spin_lock_irqsave(&pgd_lock, flags);
24183 +
24184 +#ifdef CONFIG_PAX_PER_CPU_PGD
24185 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24186 + pgd_t *pgd = get_cpu_pgd(cpu);
24187 +#else
24188 list_for_each_entry(page, &pgd_list, lru) {
24189 - if (!vmalloc_sync_one(page_address(page), address))
24190 + pgd_t *pgd = page_address(page);
24191 +#endif
24192 +
24193 + if (!vmalloc_sync_one(pgd, address))
24194 break;
24195 }
24196 spin_unlock_irqrestore(&pgd_lock, flags);
24197 @@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
24198 * an interrupt in the middle of a task switch..
24199 */
24200 pgd_paddr = read_cr3();
24201 +
24202 +#ifdef CONFIG_PAX_PER_CPU_PGD
24203 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
24204 +#endif
24205 +
24206 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
24207 if (!pmd_k)
24208 return -1;
24209 @@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
24210
24211 const pgd_t *pgd_ref = pgd_offset_k(address);
24212 unsigned long flags;
24213 +
24214 +#ifdef CONFIG_PAX_PER_CPU_PGD
24215 + unsigned long cpu;
24216 +#else
24217 struct page *page;
24218 +#endif
24219
24220 if (pgd_none(*pgd_ref))
24221 continue;
24222
24223 spin_lock_irqsave(&pgd_lock, flags);
24224 +
24225 +#ifdef CONFIG_PAX_PER_CPU_PGD
24226 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24227 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24228 +#else
24229 list_for_each_entry(page, &pgd_list, lru) {
24230 pgd_t *pgd;
24231 pgd = (pgd_t *)page_address(page) + pgd_index(address);
24232 +#endif
24233 +
24234 if (pgd_none(*pgd))
24235 set_pgd(pgd, *pgd_ref);
24236 else
24237 @@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
24238 * happen within a race in page table update. In the later
24239 * case just flush:
24240 */
24241 +
24242 +#ifdef CONFIG_PAX_PER_CPU_PGD
24243 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
24244 + pgd = pgd_offset_cpu(smp_processor_id(), address);
24245 +#else
24246 pgd = pgd_offset(current->active_mm, address);
24247 +#endif
24248 +
24249 pgd_ref = pgd_offset_k(address);
24250 if (pgd_none(*pgd_ref))
24251 return -1;
24252 @@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
24253 static int is_errata100(struct pt_regs *regs, unsigned long address)
24254 {
24255 #ifdef CONFIG_X86_64
24256 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24257 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24258 return 1;
24259 #endif
24260 return 0;
24261 @@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
24262 }
24263
24264 static const char nx_warning[] = KERN_CRIT
24265 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24266 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24267
24268 static void
24269 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24270 @@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24271 if (!oops_may_print())
24272 return;
24273
24274 - if (error_code & PF_INSTR) {
24275 + if (nx_enabled && (error_code & PF_INSTR)) {
24276 unsigned int level;
24277
24278 pte_t *pte = lookup_address(address, &level);
24279
24280 if (pte && pte_present(*pte) && !pte_exec(*pte))
24281 - printk(nx_warning, current_uid());
24282 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24283 }
24284
24285 +#ifdef CONFIG_PAX_KERNEXEC
24286 + if (init_mm.start_code <= address && address < init_mm.end_code) {
24287 + if (current->signal->curr_ip)
24288 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24289 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24290 + else
24291 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24292 + current->comm, task_pid_nr(current), current_uid(), current_euid());
24293 + }
24294 +#endif
24295 +
24296 printk(KERN_ALERT "BUG: unable to handle kernel ");
24297 if (address < PAGE_SIZE)
24298 printk(KERN_CONT "NULL pointer dereference");
24299 @@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24300 {
24301 struct task_struct *tsk = current;
24302
24303 +#ifdef CONFIG_X86_64
24304 + struct mm_struct *mm = tsk->mm;
24305 +
24306 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24307 + if (regs->ip == (unsigned long)vgettimeofday) {
24308 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24309 + return;
24310 + } else if (regs->ip == (unsigned long)vtime) {
24311 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24312 + return;
24313 + } else if (regs->ip == (unsigned long)vgetcpu) {
24314 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24315 + return;
24316 + }
24317 + }
24318 +#endif
24319 +
24320 /* User mode accesses just cause a SIGSEGV */
24321 if (error_code & PF_USER) {
24322 /*
24323 @@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24324 if (is_errata100(regs, address))
24325 return;
24326
24327 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24328 + if (pax_is_fetch_fault(regs, error_code, address)) {
24329 +
24330 +#ifdef CONFIG_PAX_EMUTRAMP
24331 + switch (pax_handle_fetch_fault(regs)) {
24332 + case 2:
24333 + return;
24334 + }
24335 +#endif
24336 +
24337 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24338 + do_group_exit(SIGKILL);
24339 + }
24340 +#endif
24341 +
24342 if (unlikely(show_unhandled_signals))
24343 show_signal_msg(regs, error_code, address, tsk);
24344
24345 @@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24346 if (fault & VM_FAULT_HWPOISON) {
24347 printk(KERN_ERR
24348 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24349 - tsk->comm, tsk->pid, address);
24350 + tsk->comm, task_pid_nr(tsk), address);
24351 code = BUS_MCEERR_AR;
24352 }
24353 #endif
24354 @@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24355 return 1;
24356 }
24357
24358 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24359 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24360 +{
24361 + pte_t *pte;
24362 + pmd_t *pmd;
24363 + spinlock_t *ptl;
24364 + unsigned char pte_mask;
24365 +
24366 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24367 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
24368 + return 0;
24369 +
24370 + /* PaX: it's our fault, let's handle it if we can */
24371 +
24372 + /* PaX: take a look at read faults before acquiring any locks */
24373 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24374 + /* instruction fetch attempt from a protected page in user mode */
24375 + up_read(&mm->mmap_sem);
24376 +
24377 +#ifdef CONFIG_PAX_EMUTRAMP
24378 + switch (pax_handle_fetch_fault(regs)) {
24379 + case 2:
24380 + return 1;
24381 + }
24382 +#endif
24383 +
24384 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24385 + do_group_exit(SIGKILL);
24386 + }
24387 +
24388 + pmd = pax_get_pmd(mm, address);
24389 + if (unlikely(!pmd))
24390 + return 0;
24391 +
24392 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24393 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24394 + pte_unmap_unlock(pte, ptl);
24395 + return 0;
24396 + }
24397 +
24398 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24399 + /* write attempt to a protected page in user mode */
24400 + pte_unmap_unlock(pte, ptl);
24401 + return 0;
24402 + }
24403 +
24404 +#ifdef CONFIG_SMP
24405 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24406 +#else
24407 + if (likely(address > get_limit(regs->cs)))
24408 +#endif
24409 + {
24410 + set_pte(pte, pte_mkread(*pte));
24411 + __flush_tlb_one(address);
24412 + pte_unmap_unlock(pte, ptl);
24413 + up_read(&mm->mmap_sem);
24414 + return 1;
24415 + }
24416 +
24417 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24418 +
24419 + /*
24420 + * PaX: fill DTLB with user rights and retry
24421 + */
24422 + __asm__ __volatile__ (
24423 + "orb %2,(%1)\n"
24424 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24425 +/*
24426 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24427 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24428 + * page fault when examined during a TLB load attempt. this is true not only
24429 + * for PTEs holding a non-present entry but also present entries that will
24430 + * raise a page fault (such as those set up by PaX, or the copy-on-write
24431 + * mechanism). in effect it means that we do *not* need to flush the TLBs
24432 + * for our target pages since their PTEs are simply not in the TLBs at all.
24433 +
24434 + * the best thing in omitting it is that we gain around 15-20% speed in the
24435 + * fast path of the page fault handler and can get rid of tracing since we
24436 + * can no longer flush unintended entries.
24437 + */
24438 + "invlpg (%0)\n"
24439 +#endif
24440 + __copyuser_seg"testb $0,(%0)\n"
24441 + "xorb %3,(%1)\n"
24442 + :
24443 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24444 + : "memory", "cc");
24445 + pte_unmap_unlock(pte, ptl);
24446 + up_read(&mm->mmap_sem);
24447 + return 1;
24448 +}
24449 +#endif
24450 +
24451 /*
24452 * Handle a spurious fault caused by a stale TLB entry.
24453 *
24454 @@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24455 static inline int
24456 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24457 {
24458 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24459 + return 1;
24460 +
24461 if (write) {
24462 /* write, present and write, not present: */
24463 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24464 @@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24465 {
24466 struct vm_area_struct *vma;
24467 struct task_struct *tsk;
24468 - unsigned long address;
24469 struct mm_struct *mm;
24470 int write;
24471 int fault;
24472
24473 - tsk = current;
24474 - mm = tsk->mm;
24475 -
24476 /* Get the faulting address: */
24477 - address = read_cr2();
24478 + unsigned long address = read_cr2();
24479 +
24480 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24481 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24482 + if (!search_exception_tables(regs->ip)) {
24483 + bad_area_nosemaphore(regs, error_code, address);
24484 + return;
24485 + }
24486 + if (address < PAX_USER_SHADOW_BASE) {
24487 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24488 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24489 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24490 + } else
24491 + address -= PAX_USER_SHADOW_BASE;
24492 + }
24493 +#endif
24494 +
24495 + tsk = current;
24496 + mm = tsk->mm;
24497
24498 /*
24499 * Detect and handle instructions that would cause a page fault for
24500 @@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24501 * User-mode registers count as a user access even for any
24502 * potential system fault or CPU buglet:
24503 */
24504 - if (user_mode_vm(regs)) {
24505 + if (user_mode(regs)) {
24506 local_irq_enable();
24507 error_code |= PF_USER;
24508 } else {
24509 @@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24510 might_sleep();
24511 }
24512
24513 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24514 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24515 + return;
24516 +#endif
24517 +
24518 vma = find_vma(mm, address);
24519 if (unlikely(!vma)) {
24520 bad_area(regs, error_code, address);
24521 @@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24522 bad_area(regs, error_code, address);
24523 return;
24524 }
24525 - if (error_code & PF_USER) {
24526 - /*
24527 - * Accessing the stack below %sp is always a bug.
24528 - * The large cushion allows instructions like enter
24529 - * and pusha to work. ("enter $65535, $31" pushes
24530 - * 32 pointers and then decrements %sp by 65535.)
24531 - */
24532 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24533 - bad_area(regs, error_code, address);
24534 - return;
24535 - }
24536 + /*
24537 + * Accessing the stack below %sp is always a bug.
24538 + * The large cushion allows instructions like enter
24539 + * and pusha to work. ("enter $65535, $31" pushes
24540 + * 32 pointers and then decrements %sp by 65535.)
24541 + */
24542 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24543 + bad_area(regs, error_code, address);
24544 + return;
24545 }
24546 +
24547 +#ifdef CONFIG_PAX_SEGMEXEC
24548 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24549 + bad_area(regs, error_code, address);
24550 + return;
24551 + }
24552 +#endif
24553 +
24554 if (unlikely(expand_stack(vma, address))) {
24555 bad_area(regs, error_code, address);
24556 return;
24557 @@ -1146,3 +1390,292 @@ good_area:
24558
24559 up_read(&mm->mmap_sem);
24560 }
24561 +
24562 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24563 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24564 +{
24565 + struct mm_struct *mm = current->mm;
24566 + unsigned long ip = regs->ip;
24567 +
24568 + if (v8086_mode(regs))
24569 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24570 +
24571 +#ifdef CONFIG_PAX_PAGEEXEC
24572 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24573 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24574 + return true;
24575 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24576 + return true;
24577 + return false;
24578 + }
24579 +#endif
24580 +
24581 +#ifdef CONFIG_PAX_SEGMEXEC
24582 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24583 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24584 + return true;
24585 + return false;
24586 + }
24587 +#endif
24588 +
24589 + return false;
24590 +}
24591 +#endif
24592 +
24593 +#ifdef CONFIG_PAX_EMUTRAMP
24594 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24595 +{
24596 + int err;
24597 +
24598 + do { /* PaX: libffi trampoline emulation */
24599 + unsigned char mov, jmp;
24600 + unsigned int addr1, addr2;
24601 +
24602 +#ifdef CONFIG_X86_64
24603 + if ((regs->ip + 9) >> 32)
24604 + break;
24605 +#endif
24606 +
24607 + err = get_user(mov, (unsigned char __user *)regs->ip);
24608 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24609 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24610 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24611 +
24612 + if (err)
24613 + break;
24614 +
24615 + if (mov == 0xB8 && jmp == 0xE9) {
24616 + regs->ax = addr1;
24617 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24618 + return 2;
24619 + }
24620 + } while (0);
24621 +
24622 + do { /* PaX: gcc trampoline emulation #1 */
24623 + unsigned char mov1, mov2;
24624 + unsigned short jmp;
24625 + unsigned int addr1, addr2;
24626 +
24627 +#ifdef CONFIG_X86_64
24628 + if ((regs->ip + 11) >> 32)
24629 + break;
24630 +#endif
24631 +
24632 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24633 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24634 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24635 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24636 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24637 +
24638 + if (err)
24639 + break;
24640 +
24641 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24642 + regs->cx = addr1;
24643 + regs->ax = addr2;
24644 + regs->ip = addr2;
24645 + return 2;
24646 + }
24647 + } while (0);
24648 +
24649 + do { /* PaX: gcc trampoline emulation #2 */
24650 + unsigned char mov, jmp;
24651 + unsigned int addr1, addr2;
24652 +
24653 +#ifdef CONFIG_X86_64
24654 + if ((regs->ip + 9) >> 32)
24655 + break;
24656 +#endif
24657 +
24658 + err = get_user(mov, (unsigned char __user *)regs->ip);
24659 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24660 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24661 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24662 +
24663 + if (err)
24664 + break;
24665 +
24666 + if (mov == 0xB9 && jmp == 0xE9) {
24667 + regs->cx = addr1;
24668 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24669 + return 2;
24670 + }
24671 + } while (0);
24672 +
24673 + return 1; /* PaX in action */
24674 +}
24675 +
24676 +#ifdef CONFIG_X86_64
24677 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24678 +{
24679 + int err;
24680 +
24681 + do { /* PaX: libffi trampoline emulation */
24682 + unsigned short mov1, mov2, jmp1;
24683 + unsigned char stcclc, jmp2;
24684 + unsigned long addr1, addr2;
24685 +
24686 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24687 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24688 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24689 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24690 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24691 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24692 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24693 +
24694 + if (err)
24695 + break;
24696 +
24697 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24698 + regs->r11 = addr1;
24699 + regs->r10 = addr2;
24700 + if (stcclc == 0xF8)
24701 + regs->flags &= ~X86_EFLAGS_CF;
24702 + else
24703 + regs->flags |= X86_EFLAGS_CF;
24704 + regs->ip = addr1;
24705 + return 2;
24706 + }
24707 + } while (0);
24708 +
24709 + do { /* PaX: gcc trampoline emulation #1 */
24710 + unsigned short mov1, mov2, jmp1;
24711 + unsigned char jmp2;
24712 + unsigned int addr1;
24713 + unsigned long addr2;
24714 +
24715 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24716 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24717 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24718 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24719 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24720 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24721 +
24722 + if (err)
24723 + break;
24724 +
24725 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24726 + regs->r11 = addr1;
24727 + regs->r10 = addr2;
24728 + regs->ip = addr1;
24729 + return 2;
24730 + }
24731 + } while (0);
24732 +
24733 + do { /* PaX: gcc trampoline emulation #2 */
24734 + unsigned short mov1, mov2, jmp1;
24735 + unsigned char jmp2;
24736 + unsigned long addr1, addr2;
24737 +
24738 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24739 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24740 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24741 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24742 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24743 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24744 +
24745 + if (err)
24746 + break;
24747 +
24748 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24749 + regs->r11 = addr1;
24750 + regs->r10 = addr2;
24751 + regs->ip = addr1;
24752 + return 2;
24753 + }
24754 + } while (0);
24755 +
24756 + return 1; /* PaX in action */
24757 +}
24758 +#endif
24759 +
24760 +/*
24761 + * PaX: decide what to do with offenders (regs->ip = fault address)
24762 + *
24763 + * returns 1 when task should be killed
24764 + * 2 when gcc trampoline was detected
24765 + */
24766 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24767 +{
24768 + if (v8086_mode(regs))
24769 + return 1;
24770 +
24771 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24772 + return 1;
24773 +
24774 +#ifdef CONFIG_X86_32
24775 + return pax_handle_fetch_fault_32(regs);
24776 +#else
24777 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24778 + return pax_handle_fetch_fault_32(regs);
24779 + else
24780 + return pax_handle_fetch_fault_64(regs);
24781 +#endif
24782 +}
24783 +#endif
24784 +
24785 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24786 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24787 +{
24788 + long i;
24789 +
24790 + printk(KERN_ERR "PAX: bytes at PC: ");
24791 + for (i = 0; i < 20; i++) {
24792 + unsigned char c;
24793 + if (get_user(c, (unsigned char __force_user *)pc+i))
24794 + printk(KERN_CONT "?? ");
24795 + else
24796 + printk(KERN_CONT "%02x ", c);
24797 + }
24798 + printk("\n");
24799 +
24800 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24801 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24802 + unsigned long c;
24803 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24804 +#ifdef CONFIG_X86_32
24805 + printk(KERN_CONT "???????? ");
24806 +#else
24807 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24808 + printk(KERN_CONT "???????? ???????? ");
24809 + else
24810 + printk(KERN_CONT "???????????????? ");
24811 +#endif
24812 + } else {
24813 +#ifdef CONFIG_X86_64
24814 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24815 + printk(KERN_CONT "%08x ", (unsigned int)c);
24816 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24817 + } else
24818 +#endif
24819 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24820 + }
24821 + }
24822 + printk("\n");
24823 +}
24824 +#endif
24825 +
24826 +/**
24827 + * probe_kernel_write(): safely attempt to write to a location
24828 + * @dst: address to write to
24829 + * @src: pointer to the data that shall be written
24830 + * @size: size of the data chunk
24831 + *
24832 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24833 + * happens, handle that and return -EFAULT.
24834 + */
24835 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24836 +{
24837 + long ret;
24838 + mm_segment_t old_fs = get_fs();
24839 +
24840 + set_fs(KERNEL_DS);
24841 + pagefault_disable();
24842 + pax_open_kernel();
24843 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24844 + pax_close_kernel();
24845 + pagefault_enable();
24846 + set_fs(old_fs);
24847 +
24848 + return ret ? -EFAULT : 0;
24849 +}
24850 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24851 index 71da1bc..7a16bf4 100644
24852 --- a/arch/x86/mm/gup.c
24853 +++ b/arch/x86/mm/gup.c
24854 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24855 addr = start;
24856 len = (unsigned long) nr_pages << PAGE_SHIFT;
24857 end = start + len;
24858 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24859 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24860 (void __user *)start, len)))
24861 return 0;
24862
24863 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24864 index 63a6ba6..79abd7a 100644
24865 --- a/arch/x86/mm/highmem_32.c
24866 +++ b/arch/x86/mm/highmem_32.c
24867 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24868 idx = type + KM_TYPE_NR*smp_processor_id();
24869 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24870 BUG_ON(!pte_none(*(kmap_pte-idx)));
24871 +
24872 + pax_open_kernel();
24873 set_pte(kmap_pte-idx, mk_pte(page, prot));
24874 + pax_close_kernel();
24875
24876 return (void *)vaddr;
24877 }
24878 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24879 index f46c3407..6ff9a26 100644
24880 --- a/arch/x86/mm/hugetlbpage.c
24881 +++ b/arch/x86/mm/hugetlbpage.c
24882 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24883 struct hstate *h = hstate_file(file);
24884 struct mm_struct *mm = current->mm;
24885 struct vm_area_struct *vma;
24886 - unsigned long start_addr;
24887 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24888 +
24889 +#ifdef CONFIG_PAX_SEGMEXEC
24890 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24891 + pax_task_size = SEGMEXEC_TASK_SIZE;
24892 +#endif
24893 +
24894 + pax_task_size -= PAGE_SIZE;
24895
24896 if (len > mm->cached_hole_size) {
24897 - start_addr = mm->free_area_cache;
24898 + start_addr = mm->free_area_cache;
24899 } else {
24900 - start_addr = TASK_UNMAPPED_BASE;
24901 - mm->cached_hole_size = 0;
24902 + start_addr = mm->mmap_base;
24903 + mm->cached_hole_size = 0;
24904 }
24905
24906 full_search:
24907 @@ -281,26 +288,27 @@ full_search:
24908
24909 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24910 /* At this point: (!vma || addr < vma->vm_end). */
24911 - if (TASK_SIZE - len < addr) {
24912 + if (pax_task_size - len < addr) {
24913 /*
24914 * Start a new search - just in case we missed
24915 * some holes.
24916 */
24917 - if (start_addr != TASK_UNMAPPED_BASE) {
24918 - start_addr = TASK_UNMAPPED_BASE;
24919 + if (start_addr != mm->mmap_base) {
24920 + start_addr = mm->mmap_base;
24921 mm->cached_hole_size = 0;
24922 goto full_search;
24923 }
24924 return -ENOMEM;
24925 }
24926 - if (!vma || addr + len <= vma->vm_start) {
24927 - mm->free_area_cache = addr + len;
24928 - return addr;
24929 - }
24930 + if (check_heap_stack_gap(vma, addr, len))
24931 + break;
24932 if (addr + mm->cached_hole_size < vma->vm_start)
24933 mm->cached_hole_size = vma->vm_start - addr;
24934 addr = ALIGN(vma->vm_end, huge_page_size(h));
24935 }
24936 +
24937 + mm->free_area_cache = addr + len;
24938 + return addr;
24939 }
24940
24941 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24942 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24943 {
24944 struct hstate *h = hstate_file(file);
24945 struct mm_struct *mm = current->mm;
24946 - struct vm_area_struct *vma, *prev_vma;
24947 - unsigned long base = mm->mmap_base, addr = addr0;
24948 + struct vm_area_struct *vma;
24949 + unsigned long base = mm->mmap_base, addr;
24950 unsigned long largest_hole = mm->cached_hole_size;
24951 - int first_time = 1;
24952
24953 /* don't allow allocations above current base */
24954 if (mm->free_area_cache > base)
24955 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24956 largest_hole = 0;
24957 mm->free_area_cache = base;
24958 }
24959 -try_again:
24960 +
24961 /* make sure it can fit in the remaining address space */
24962 if (mm->free_area_cache < len)
24963 goto fail;
24964
24965 /* either no address requested or cant fit in requested address hole */
24966 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24967 + addr = (mm->free_area_cache - len);
24968 do {
24969 + addr &= huge_page_mask(h);
24970 + vma = find_vma(mm, addr);
24971 /*
24972 * Lookup failure means no vma is above this address,
24973 * i.e. return with success:
24974 - */
24975 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24976 - return addr;
24977 -
24978 - /*
24979 * new region fits between prev_vma->vm_end and
24980 * vma->vm_start, use it:
24981 */
24982 - if (addr + len <= vma->vm_start &&
24983 - (!prev_vma || (addr >= prev_vma->vm_end))) {
24984 + if (check_heap_stack_gap(vma, addr, len)) {
24985 /* remember the address as a hint for next time */
24986 - mm->cached_hole_size = largest_hole;
24987 - return (mm->free_area_cache = addr);
24988 - } else {
24989 - /* pull free_area_cache down to the first hole */
24990 - if (mm->free_area_cache == vma->vm_end) {
24991 - mm->free_area_cache = vma->vm_start;
24992 - mm->cached_hole_size = largest_hole;
24993 - }
24994 + mm->cached_hole_size = largest_hole;
24995 + return (mm->free_area_cache = addr);
24996 + }
24997 + /* pull free_area_cache down to the first hole */
24998 + if (mm->free_area_cache == vma->vm_end) {
24999 + mm->free_area_cache = vma->vm_start;
25000 + mm->cached_hole_size = largest_hole;
25001 }
25002
25003 /* remember the largest hole we saw so far */
25004 if (addr + largest_hole < vma->vm_start)
25005 - largest_hole = vma->vm_start - addr;
25006 + largest_hole = vma->vm_start - addr;
25007
25008 /* try just below the current vma->vm_start */
25009 - addr = (vma->vm_start - len) & huge_page_mask(h);
25010 - } while (len <= vma->vm_start);
25011 + addr = skip_heap_stack_gap(vma, len);
25012 + } while (!IS_ERR_VALUE(addr));
25013
25014 fail:
25015 /*
25016 - * if hint left us with no space for the requested
25017 - * mapping then try again:
25018 - */
25019 - if (first_time) {
25020 - mm->free_area_cache = base;
25021 - largest_hole = 0;
25022 - first_time = 0;
25023 - goto try_again;
25024 - }
25025 - /*
25026 * A failed mmap() very likely causes application failure,
25027 * so fall back to the bottom-up function here. This scenario
25028 * can happen with large stack limits and large mmap()
25029 * allocations.
25030 */
25031 - mm->free_area_cache = TASK_UNMAPPED_BASE;
25032 +
25033 +#ifdef CONFIG_PAX_SEGMEXEC
25034 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25035 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
25036 + else
25037 +#endif
25038 +
25039 + mm->mmap_base = TASK_UNMAPPED_BASE;
25040 +
25041 +#ifdef CONFIG_PAX_RANDMMAP
25042 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25043 + mm->mmap_base += mm->delta_mmap;
25044 +#endif
25045 +
25046 + mm->free_area_cache = mm->mmap_base;
25047 mm->cached_hole_size = ~0UL;
25048 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
25049 len, pgoff, flags);
25050 @@ -387,6 +393,7 @@ fail:
25051 /*
25052 * Restore the topdown base:
25053 */
25054 + mm->mmap_base = base;
25055 mm->free_area_cache = base;
25056 mm->cached_hole_size = ~0UL;
25057
25058 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25059 struct hstate *h = hstate_file(file);
25060 struct mm_struct *mm = current->mm;
25061 struct vm_area_struct *vma;
25062 + unsigned long pax_task_size = TASK_SIZE;
25063
25064 if (len & ~huge_page_mask(h))
25065 return -EINVAL;
25066 - if (len > TASK_SIZE)
25067 +
25068 +#ifdef CONFIG_PAX_SEGMEXEC
25069 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25070 + pax_task_size = SEGMEXEC_TASK_SIZE;
25071 +#endif
25072 +
25073 + pax_task_size -= PAGE_SIZE;
25074 +
25075 + if (len > pax_task_size)
25076 return -ENOMEM;
25077
25078 if (flags & MAP_FIXED) {
25079 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25080 if (addr) {
25081 addr = ALIGN(addr, huge_page_size(h));
25082 vma = find_vma(mm, addr);
25083 - if (TASK_SIZE - len >= addr &&
25084 - (!vma || addr + len <= vma->vm_start))
25085 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
25086 return addr;
25087 }
25088 if (mm->get_unmapped_area == arch_get_unmapped_area)
25089 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
25090 index 73ffd55..f61c2a7 100644
25091 --- a/arch/x86/mm/init.c
25092 +++ b/arch/x86/mm/init.c
25093 @@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
25094 * cause a hotspot and fill up ZONE_DMA. The page tables
25095 * need roughly 0.5KB per GB.
25096 */
25097 -#ifdef CONFIG_X86_32
25098 - start = 0x7000;
25099 -#else
25100 - start = 0x8000;
25101 -#endif
25102 + start = 0x100000;
25103 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
25104 tables, PAGE_SIZE);
25105 if (e820_table_start == -1UL)
25106 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25107 #endif
25108
25109 set_nx();
25110 - if (nx_enabled)
25111 + if (nx_enabled && cpu_has_nx)
25112 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
25113
25114 /* Enable PSE if available */
25115 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25116 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
25117 * mmio resources as well as potential bios/acpi data regions.
25118 */
25119 +
25120 int devmem_is_allowed(unsigned long pagenr)
25121 {
25122 +#ifdef CONFIG_GRKERNSEC_KMEM
25123 + /* allow BDA */
25124 + if (!pagenr)
25125 + return 1;
25126 + /* allow EBDA */
25127 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
25128 + return 1;
25129 + /* allow ISA/video mem */
25130 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25131 + return 1;
25132 + /* throw out everything else below 1MB */
25133 + if (pagenr <= 256)
25134 + return 0;
25135 +#else
25136 if (pagenr <= 256)
25137 return 1;
25138 +#endif
25139 +
25140 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
25141 return 0;
25142 if (!page_is_ram(pagenr))
25143 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
25144
25145 void free_initmem(void)
25146 {
25147 +
25148 +#ifdef CONFIG_PAX_KERNEXEC
25149 +#ifdef CONFIG_X86_32
25150 + /* PaX: limit KERNEL_CS to actual size */
25151 + unsigned long addr, limit;
25152 + struct desc_struct d;
25153 + int cpu;
25154 +
25155 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
25156 + limit = (limit - 1UL) >> PAGE_SHIFT;
25157 +
25158 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
25159 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25160 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
25161 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
25162 + }
25163 +
25164 + /* PaX: make KERNEL_CS read-only */
25165 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
25166 + if (!paravirt_enabled())
25167 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
25168 +/*
25169 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
25170 + pgd = pgd_offset_k(addr);
25171 + pud = pud_offset(pgd, addr);
25172 + pmd = pmd_offset(pud, addr);
25173 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25174 + }
25175 +*/
25176 +#ifdef CONFIG_X86_PAE
25177 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
25178 +/*
25179 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
25180 + pgd = pgd_offset_k(addr);
25181 + pud = pud_offset(pgd, addr);
25182 + pmd = pmd_offset(pud, addr);
25183 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25184 + }
25185 +*/
25186 +#endif
25187 +
25188 +#ifdef CONFIG_MODULES
25189 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
25190 +#endif
25191 +
25192 +#else
25193 + pgd_t *pgd;
25194 + pud_t *pud;
25195 + pmd_t *pmd;
25196 + unsigned long addr, end;
25197 +
25198 + /* PaX: make kernel code/rodata read-only, rest non-executable */
25199 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
25200 + pgd = pgd_offset_k(addr);
25201 + pud = pud_offset(pgd, addr);
25202 + pmd = pmd_offset(pud, addr);
25203 + if (!pmd_present(*pmd))
25204 + continue;
25205 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
25206 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25207 + else
25208 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25209 + }
25210 +
25211 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
25212 + end = addr + KERNEL_IMAGE_SIZE;
25213 + for (; addr < end; addr += PMD_SIZE) {
25214 + pgd = pgd_offset_k(addr);
25215 + pud = pud_offset(pgd, addr);
25216 + pmd = pmd_offset(pud, addr);
25217 + if (!pmd_present(*pmd))
25218 + continue;
25219 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
25220 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25221 + }
25222 +#endif
25223 +
25224 + flush_tlb_all();
25225 +#endif
25226 +
25227 free_init_pages("unused kernel memory",
25228 (unsigned long)(&__init_begin),
25229 (unsigned long)(&__init_end));
25230 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
25231 index 30938c1..bda3d5d 100644
25232 --- a/arch/x86/mm/init_32.c
25233 +++ b/arch/x86/mm/init_32.c
25234 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
25235 }
25236
25237 /*
25238 - * Creates a middle page table and puts a pointer to it in the
25239 - * given global directory entry. This only returns the gd entry
25240 - * in non-PAE compilation mode, since the middle layer is folded.
25241 - */
25242 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
25243 -{
25244 - pud_t *pud;
25245 - pmd_t *pmd_table;
25246 -
25247 -#ifdef CONFIG_X86_PAE
25248 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
25249 - if (after_bootmem)
25250 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
25251 - else
25252 - pmd_table = (pmd_t *)alloc_low_page();
25253 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
25254 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
25255 - pud = pud_offset(pgd, 0);
25256 - BUG_ON(pmd_table != pmd_offset(pud, 0));
25257 -
25258 - return pmd_table;
25259 - }
25260 -#endif
25261 - pud = pud_offset(pgd, 0);
25262 - pmd_table = pmd_offset(pud, 0);
25263 -
25264 - return pmd_table;
25265 -}
25266 -
25267 -/*
25268 * Create a page table and place a pointer to it in a middle page
25269 * directory entry:
25270 */
25271 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25272 page_table = (pte_t *)alloc_low_page();
25273
25274 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25275 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25276 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25277 +#else
25278 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25279 +#endif
25280 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25281 }
25282
25283 return pte_offset_kernel(pmd, 0);
25284 }
25285
25286 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
25287 +{
25288 + pud_t *pud;
25289 + pmd_t *pmd_table;
25290 +
25291 + pud = pud_offset(pgd, 0);
25292 + pmd_table = pmd_offset(pud, 0);
25293 +
25294 + return pmd_table;
25295 +}
25296 +
25297 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25298 {
25299 int pgd_idx = pgd_index(vaddr);
25300 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25301 int pgd_idx, pmd_idx;
25302 unsigned long vaddr;
25303 pgd_t *pgd;
25304 + pud_t *pud;
25305 pmd_t *pmd;
25306 pte_t *pte = NULL;
25307
25308 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25309 pgd = pgd_base + pgd_idx;
25310
25311 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25312 - pmd = one_md_table_init(pgd);
25313 - pmd = pmd + pmd_index(vaddr);
25314 + pud = pud_offset(pgd, vaddr);
25315 + pmd = pmd_offset(pud, vaddr);
25316 +
25317 +#ifdef CONFIG_X86_PAE
25318 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25319 +#endif
25320 +
25321 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25322 pmd++, pmd_idx++) {
25323 pte = page_table_kmap_check(one_page_table_init(pmd),
25324 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25325 }
25326 }
25327
25328 -static inline int is_kernel_text(unsigned long addr)
25329 +static inline int is_kernel_text(unsigned long start, unsigned long end)
25330 {
25331 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
25332 - return 1;
25333 - return 0;
25334 + if ((start > ktla_ktva((unsigned long)_etext) ||
25335 + end <= ktla_ktva((unsigned long)_stext)) &&
25336 + (start > ktla_ktva((unsigned long)_einittext) ||
25337 + end <= ktla_ktva((unsigned long)_sinittext)) &&
25338 +
25339 +#ifdef CONFIG_ACPI_SLEEP
25340 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25341 +#endif
25342 +
25343 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25344 + return 0;
25345 + return 1;
25346 }
25347
25348 /*
25349 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25350 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25351 unsigned long start_pfn, end_pfn;
25352 pgd_t *pgd_base = swapper_pg_dir;
25353 - int pgd_idx, pmd_idx, pte_ofs;
25354 + unsigned int pgd_idx, pmd_idx, pte_ofs;
25355 unsigned long pfn;
25356 pgd_t *pgd;
25357 + pud_t *pud;
25358 pmd_t *pmd;
25359 pte_t *pte;
25360 unsigned pages_2m, pages_4k;
25361 @@ -278,8 +279,13 @@ repeat:
25362 pfn = start_pfn;
25363 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25364 pgd = pgd_base + pgd_idx;
25365 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25366 - pmd = one_md_table_init(pgd);
25367 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25368 + pud = pud_offset(pgd, 0);
25369 + pmd = pmd_offset(pud, 0);
25370 +
25371 +#ifdef CONFIG_X86_PAE
25372 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25373 +#endif
25374
25375 if (pfn >= end_pfn)
25376 continue;
25377 @@ -291,14 +297,13 @@ repeat:
25378 #endif
25379 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25380 pmd++, pmd_idx++) {
25381 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25382 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25383
25384 /*
25385 * Map with big pages if possible, otherwise
25386 * create normal page tables:
25387 */
25388 if (use_pse) {
25389 - unsigned int addr2;
25390 pgprot_t prot = PAGE_KERNEL_LARGE;
25391 /*
25392 * first pass will use the same initial
25393 @@ -308,11 +313,7 @@ repeat:
25394 __pgprot(PTE_IDENT_ATTR |
25395 _PAGE_PSE);
25396
25397 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25398 - PAGE_OFFSET + PAGE_SIZE-1;
25399 -
25400 - if (is_kernel_text(addr) ||
25401 - is_kernel_text(addr2))
25402 + if (is_kernel_text(address, address + PMD_SIZE))
25403 prot = PAGE_KERNEL_LARGE_EXEC;
25404
25405 pages_2m++;
25406 @@ -329,7 +330,7 @@ repeat:
25407 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25408 pte += pte_ofs;
25409 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25410 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25411 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25412 pgprot_t prot = PAGE_KERNEL;
25413 /*
25414 * first pass will use the same initial
25415 @@ -337,7 +338,7 @@ repeat:
25416 */
25417 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25418
25419 - if (is_kernel_text(addr))
25420 + if (is_kernel_text(address, address + PAGE_SIZE))
25421 prot = PAGE_KERNEL_EXEC;
25422
25423 pages_4k++;
25424 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25425
25426 pud = pud_offset(pgd, va);
25427 pmd = pmd_offset(pud, va);
25428 - if (!pmd_present(*pmd))
25429 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
25430 break;
25431
25432 pte = pte_offset_kernel(pmd, va);
25433 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25434
25435 static void __init pagetable_init(void)
25436 {
25437 - pgd_t *pgd_base = swapper_pg_dir;
25438 -
25439 - permanent_kmaps_init(pgd_base);
25440 + permanent_kmaps_init(swapper_pg_dir);
25441 }
25442
25443 #ifdef CONFIG_ACPI_SLEEP
25444 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25445 * ACPI suspend needs this for resume, because things like the intel-agp
25446 * driver might have split up a kernel 4MB mapping.
25447 */
25448 -char swsusp_pg_dir[PAGE_SIZE]
25449 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25450 __attribute__ ((aligned(PAGE_SIZE)));
25451
25452 static inline void save_pg_dir(void)
25453 {
25454 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25455 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25456 }
25457 #else /* !CONFIG_ACPI_SLEEP */
25458 static inline void save_pg_dir(void)
25459 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25460 flush_tlb_all();
25461 }
25462
25463 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25464 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25465 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25466
25467 /* user-defined highmem size */
25468 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25469 * Initialize the boot-time allocator (with low memory only):
25470 */
25471 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25472 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25473 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25474 PAGE_SIZE);
25475 if (bootmap == -1L)
25476 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25477 @@ -864,6 +863,12 @@ void __init mem_init(void)
25478
25479 pci_iommu_alloc();
25480
25481 +#ifdef CONFIG_PAX_PER_CPU_PGD
25482 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25483 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25484 + KERNEL_PGD_PTRS);
25485 +#endif
25486 +
25487 #ifdef CONFIG_FLATMEM
25488 BUG_ON(!mem_map);
25489 #endif
25490 @@ -881,7 +886,7 @@ void __init mem_init(void)
25491 set_highmem_pages_init();
25492
25493 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25494 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25495 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25496 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25497
25498 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25499 @@ -923,10 +928,10 @@ void __init mem_init(void)
25500 ((unsigned long)&__init_end -
25501 (unsigned long)&__init_begin) >> 10,
25502
25503 - (unsigned long)&_etext, (unsigned long)&_edata,
25504 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25505 + (unsigned long)&_sdata, (unsigned long)&_edata,
25506 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25507
25508 - (unsigned long)&_text, (unsigned long)&_etext,
25509 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25510 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25511
25512 /*
25513 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25514 if (!kernel_set_to_readonly)
25515 return;
25516
25517 + start = ktla_ktva(start);
25518 pr_debug("Set kernel text: %lx - %lx for read write\n",
25519 start, start+size);
25520
25521 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25522 if (!kernel_set_to_readonly)
25523 return;
25524
25525 + start = ktla_ktva(start);
25526 pr_debug("Set kernel text: %lx - %lx for read only\n",
25527 start, start+size);
25528
25529 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25530 unsigned long start = PFN_ALIGN(_text);
25531 unsigned long size = PFN_ALIGN(_etext) - start;
25532
25533 + start = ktla_ktva(start);
25534 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25535 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25536 size >> 10);
25537 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25538 index 7d095ad..25d2549 100644
25539 --- a/arch/x86/mm/init_64.c
25540 +++ b/arch/x86/mm/init_64.c
25541 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25542 pmd = fill_pmd(pud, vaddr);
25543 pte = fill_pte(pmd, vaddr);
25544
25545 + pax_open_kernel();
25546 set_pte(pte, new_pte);
25547 + pax_close_kernel();
25548
25549 /*
25550 * It's enough to flush this one mapping.
25551 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25552 pgd = pgd_offset_k((unsigned long)__va(phys));
25553 if (pgd_none(*pgd)) {
25554 pud = (pud_t *) spp_getpage();
25555 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25556 - _PAGE_USER));
25557 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25558 }
25559 pud = pud_offset(pgd, (unsigned long)__va(phys));
25560 if (pud_none(*pud)) {
25561 pmd = (pmd_t *) spp_getpage();
25562 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25563 - _PAGE_USER));
25564 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25565 }
25566 pmd = pmd_offset(pud, phys);
25567 BUG_ON(!pmd_none(*pmd));
25568 @@ -675,6 +675,12 @@ void __init mem_init(void)
25569
25570 pci_iommu_alloc();
25571
25572 +#ifdef CONFIG_PAX_PER_CPU_PGD
25573 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25574 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25575 + KERNEL_PGD_PTRS);
25576 +#endif
25577 +
25578 /* clear_bss() already clear the empty_zero_page */
25579
25580 reservedpages = 0;
25581 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25582 static struct vm_area_struct gate_vma = {
25583 .vm_start = VSYSCALL_START,
25584 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25585 - .vm_page_prot = PAGE_READONLY_EXEC,
25586 - .vm_flags = VM_READ | VM_EXEC
25587 + .vm_page_prot = PAGE_READONLY,
25588 + .vm_flags = VM_READ
25589 };
25590
25591 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25592 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25593
25594 const char *arch_vma_name(struct vm_area_struct *vma)
25595 {
25596 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25597 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25598 return "[vdso]";
25599 if (vma == &gate_vma)
25600 return "[vsyscall]";
25601 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25602 index 84e236c..69bd3f6 100644
25603 --- a/arch/x86/mm/iomap_32.c
25604 +++ b/arch/x86/mm/iomap_32.c
25605 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25606 debug_kmap_atomic(type);
25607 idx = type + KM_TYPE_NR * smp_processor_id();
25608 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25609 +
25610 + pax_open_kernel();
25611 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25612 + pax_close_kernel();
25613 +
25614 arch_flush_lazy_mmu_mode();
25615
25616 return (void *)vaddr;
25617 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25618 index 2feb9bd..ab91e7b 100644
25619 --- a/arch/x86/mm/ioremap.c
25620 +++ b/arch/x86/mm/ioremap.c
25621 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25622 * Second special case: Some BIOSen report the PC BIOS
25623 * area (640->1Mb) as ram even though it is not.
25624 */
25625 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25626 - pagenr < (BIOS_END >> PAGE_SHIFT))
25627 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25628 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25629 return 0;
25630
25631 for (i = 0; i < e820.nr_map; i++) {
25632 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25633 /*
25634 * Don't allow anybody to remap normal RAM that we're using..
25635 */
25636 - for (pfn = phys_addr >> PAGE_SHIFT;
25637 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25638 - pfn++) {
25639 -
25640 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25641 int is_ram = page_is_ram(pfn);
25642
25643 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25644 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25645 return NULL;
25646 WARN_ON_ONCE(is_ram);
25647 }
25648 @@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25649
25650 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25651 if (page_is_ram(start >> PAGE_SHIFT))
25652 +#ifdef CONFIG_HIGHMEM
25653 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25654 +#endif
25655 return __va(phys);
25656
25657 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
25658 @@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
25659 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25660
25661 static __initdata int after_paging_init;
25662 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25663 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25664
25665 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25666 {
25667 @@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
25668 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25669
25670 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25671 - memset(bm_pte, 0, sizeof(bm_pte));
25672 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25673 + pmd_populate_user(&init_mm, pmd, bm_pte);
25674
25675 /*
25676 * The boot-ioremap range spans multiple pmds, for which
25677 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25678 index 8cc1833..1abbc5b 100644
25679 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25680 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25681 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25682 * memory (e.g. tracked pages)? For now, we need this to avoid
25683 * invoking kmemcheck for PnP BIOS calls.
25684 */
25685 - if (regs->flags & X86_VM_MASK)
25686 + if (v8086_mode(regs))
25687 return false;
25688 - if (regs->cs != __KERNEL_CS)
25689 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25690 return false;
25691
25692 pte = kmemcheck_pte_lookup(address);
25693 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25694 index c9e57af..07a321b 100644
25695 --- a/arch/x86/mm/mmap.c
25696 +++ b/arch/x86/mm/mmap.c
25697 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25698 * Leave an at least ~128 MB hole with possible stack randomization.
25699 */
25700 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25701 -#define MAX_GAP (TASK_SIZE/6*5)
25702 +#define MAX_GAP (pax_task_size/6*5)
25703
25704 /*
25705 * True on X86_32 or when emulating IA32 on X86_64
25706 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25707 return rnd << PAGE_SHIFT;
25708 }
25709
25710 -static unsigned long mmap_base(void)
25711 +static unsigned long mmap_base(struct mm_struct *mm)
25712 {
25713 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25714 + unsigned long pax_task_size = TASK_SIZE;
25715 +
25716 +#ifdef CONFIG_PAX_SEGMEXEC
25717 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25718 + pax_task_size = SEGMEXEC_TASK_SIZE;
25719 +#endif
25720
25721 if (gap < MIN_GAP)
25722 gap = MIN_GAP;
25723 else if (gap > MAX_GAP)
25724 gap = MAX_GAP;
25725
25726 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25727 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25728 }
25729
25730 /*
25731 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25732 * does, but not when emulating X86_32
25733 */
25734 -static unsigned long mmap_legacy_base(void)
25735 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25736 {
25737 - if (mmap_is_ia32())
25738 + if (mmap_is_ia32()) {
25739 +
25740 +#ifdef CONFIG_PAX_SEGMEXEC
25741 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25742 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25743 + else
25744 +#endif
25745 +
25746 return TASK_UNMAPPED_BASE;
25747 - else
25748 + } else
25749 return TASK_UNMAPPED_BASE + mmap_rnd();
25750 }
25751
25752 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25753 void arch_pick_mmap_layout(struct mm_struct *mm)
25754 {
25755 if (mmap_is_legacy()) {
25756 - mm->mmap_base = mmap_legacy_base();
25757 + mm->mmap_base = mmap_legacy_base(mm);
25758 +
25759 +#ifdef CONFIG_PAX_RANDMMAP
25760 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25761 + mm->mmap_base += mm->delta_mmap;
25762 +#endif
25763 +
25764 mm->get_unmapped_area = arch_get_unmapped_area;
25765 mm->unmap_area = arch_unmap_area;
25766 } else {
25767 - mm->mmap_base = mmap_base();
25768 + mm->mmap_base = mmap_base(mm);
25769 +
25770 +#ifdef CONFIG_PAX_RANDMMAP
25771 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25772 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25773 +#endif
25774 +
25775 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25776 mm->unmap_area = arch_unmap_area_topdown;
25777 }
25778 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25779 index 132772a..b961f11 100644
25780 --- a/arch/x86/mm/mmio-mod.c
25781 +++ b/arch/x86/mm/mmio-mod.c
25782 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25783 break;
25784 default:
25785 {
25786 - unsigned char *ip = (unsigned char *)instptr;
25787 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25788 my_trace->opcode = MMIO_UNKNOWN_OP;
25789 my_trace->width = 0;
25790 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25791 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25792 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25793 void __iomem *addr)
25794 {
25795 - static atomic_t next_id;
25796 + static atomic_unchecked_t next_id;
25797 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25798 /* These are page-unaligned. */
25799 struct mmiotrace_map map = {
25800 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25801 .private = trace
25802 },
25803 .phys = offset,
25804 - .id = atomic_inc_return(&next_id)
25805 + .id = atomic_inc_return_unchecked(&next_id)
25806 };
25807 map.map_id = trace->id;
25808
25809 diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25810 index d253006..e56dd6a 100644
25811 --- a/arch/x86/mm/numa_32.c
25812 +++ b/arch/x86/mm/numa_32.c
25813 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25814 }
25815 #endif
25816
25817 -extern unsigned long find_max_low_pfn(void);
25818 extern unsigned long highend_pfn, highstart_pfn;
25819
25820 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25821 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25822 index e1d1069..2251ff3 100644
25823 --- a/arch/x86/mm/pageattr-test.c
25824 +++ b/arch/x86/mm/pageattr-test.c
25825 @@ -36,7 +36,7 @@ enum {
25826
25827 static int pte_testbit(pte_t pte)
25828 {
25829 - return pte_flags(pte) & _PAGE_UNUSED1;
25830 + return pte_flags(pte) & _PAGE_CPA_TEST;
25831 }
25832
25833 struct split_state {
25834 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25835 index dd38bfb..b72c63e 100644
25836 --- a/arch/x86/mm/pageattr.c
25837 +++ b/arch/x86/mm/pageattr.c
25838 @@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25839 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25840 */
25841 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25842 - pgprot_val(forbidden) |= _PAGE_NX;
25843 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25844
25845 /*
25846 * The kernel text needs to be executable for obvious reasons
25847 * Does not cover __inittext since that is gone later on. On
25848 * 64bit we do not enforce !NX on the low mapping
25849 */
25850 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25851 - pgprot_val(forbidden) |= _PAGE_NX;
25852 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25853 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25854
25855 +#ifdef CONFIG_DEBUG_RODATA
25856 /*
25857 * The .rodata section needs to be read-only. Using the pfn
25858 * catches all aliases.
25859 @@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25860 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25861 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25862 pgprot_val(forbidden) |= _PAGE_RW;
25863 +#endif
25864 +
25865 +#ifdef CONFIG_PAX_KERNEXEC
25866 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25867 + pgprot_val(forbidden) |= _PAGE_RW;
25868 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25869 + }
25870 +#endif
25871
25872 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25873
25874 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25875 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25876 {
25877 /* change init_mm */
25878 + pax_open_kernel();
25879 set_pte_atomic(kpte, pte);
25880 +
25881 #ifdef CONFIG_X86_32
25882 if (!SHARED_KERNEL_PMD) {
25883 +
25884 +#ifdef CONFIG_PAX_PER_CPU_PGD
25885 + unsigned long cpu;
25886 +#else
25887 struct page *page;
25888 +#endif
25889
25890 +#ifdef CONFIG_PAX_PER_CPU_PGD
25891 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25892 + pgd_t *pgd = get_cpu_pgd(cpu);
25893 +#else
25894 list_for_each_entry(page, &pgd_list, lru) {
25895 - pgd_t *pgd;
25896 + pgd_t *pgd = (pgd_t *)page_address(page);
25897 +#endif
25898 +
25899 pud_t *pud;
25900 pmd_t *pmd;
25901
25902 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25903 + pgd += pgd_index(address);
25904 pud = pud_offset(pgd, address);
25905 pmd = pmd_offset(pud, address);
25906 set_pte_atomic((pte_t *)pmd, pte);
25907 }
25908 }
25909 #endif
25910 + pax_close_kernel();
25911 }
25912
25913 static int
25914 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25915 index e78cd0e..de0a817 100644
25916 --- a/arch/x86/mm/pat.c
25917 +++ b/arch/x86/mm/pat.c
25918 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25919
25920 conflict:
25921 printk(KERN_INFO "%s:%d conflicting memory types "
25922 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25923 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25924 new->end, cattr_name(new->type), cattr_name(entry->type));
25925 return -EBUSY;
25926 }
25927 @@ -559,7 +559,7 @@ unlock_ret:
25928
25929 if (err) {
25930 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25931 - current->comm, current->pid, start, end);
25932 + current->comm, task_pid_nr(current), start, end);
25933 }
25934
25935 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25936 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25937 while (cursor < to) {
25938 if (!devmem_is_allowed(pfn)) {
25939 printk(KERN_INFO
25940 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25941 - current->comm, from, to);
25942 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25943 + current->comm, from, to, cursor);
25944 return 0;
25945 }
25946 cursor += PAGE_SIZE;
25947 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25948 printk(KERN_INFO
25949 "%s:%d ioremap_change_attr failed %s "
25950 "for %Lx-%Lx\n",
25951 - current->comm, current->pid,
25952 + current->comm, task_pid_nr(current),
25953 cattr_name(flags),
25954 base, (unsigned long long)(base + size));
25955 return -EINVAL;
25956 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25957 free_memtype(paddr, paddr + size);
25958 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25959 " for %Lx-%Lx, got %s\n",
25960 - current->comm, current->pid,
25961 + current->comm, task_pid_nr(current),
25962 cattr_name(want_flags),
25963 (unsigned long long)paddr,
25964 (unsigned long long)(paddr + size),
25965 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25966 index df3d5c8..c2223e1 100644
25967 --- a/arch/x86/mm/pf_in.c
25968 +++ b/arch/x86/mm/pf_in.c
25969 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25970 int i;
25971 enum reason_type rv = OTHERS;
25972
25973 - p = (unsigned char *)ins_addr;
25974 + p = (unsigned char *)ktla_ktva(ins_addr);
25975 p += skip_prefix(p, &prf);
25976 p += get_opcode(p, &opcode);
25977
25978 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25979 struct prefix_bits prf;
25980 int i;
25981
25982 - p = (unsigned char *)ins_addr;
25983 + p = (unsigned char *)ktla_ktva(ins_addr);
25984 p += skip_prefix(p, &prf);
25985 p += get_opcode(p, &opcode);
25986
25987 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25988 struct prefix_bits prf;
25989 int i;
25990
25991 - p = (unsigned char *)ins_addr;
25992 + p = (unsigned char *)ktla_ktva(ins_addr);
25993 p += skip_prefix(p, &prf);
25994 p += get_opcode(p, &opcode);
25995
25996 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25997 int i;
25998 unsigned long rv;
25999
26000 - p = (unsigned char *)ins_addr;
26001 + p = (unsigned char *)ktla_ktva(ins_addr);
26002 p += skip_prefix(p, &prf);
26003 p += get_opcode(p, &opcode);
26004 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
26005 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
26006 int i;
26007 unsigned long rv;
26008
26009 - p = (unsigned char *)ins_addr;
26010 + p = (unsigned char *)ktla_ktva(ins_addr);
26011 p += skip_prefix(p, &prf);
26012 p += get_opcode(p, &opcode);
26013 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
26014 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
26015 index e0e6fad..c56b495 100644
26016 --- a/arch/x86/mm/pgtable.c
26017 +++ b/arch/x86/mm/pgtable.c
26018 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
26019 list_del(&page->lru);
26020 }
26021
26022 -#define UNSHARED_PTRS_PER_PGD \
26023 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26024 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26025 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
26026
26027 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26028 +{
26029 + while (count--)
26030 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
26031 +}
26032 +#endif
26033 +
26034 +#ifdef CONFIG_PAX_PER_CPU_PGD
26035 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26036 +{
26037 + while (count--)
26038 +
26039 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26040 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
26041 +#else
26042 + *dst++ = *src++;
26043 +#endif
26044 +
26045 +}
26046 +#endif
26047 +
26048 +#ifdef CONFIG_X86_64
26049 +#define pxd_t pud_t
26050 +#define pyd_t pgd_t
26051 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
26052 +#define pxd_free(mm, pud) pud_free((mm), (pud))
26053 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
26054 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
26055 +#define PYD_SIZE PGDIR_SIZE
26056 +#else
26057 +#define pxd_t pmd_t
26058 +#define pyd_t pud_t
26059 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
26060 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
26061 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
26062 +#define pyd_offset(mm, address) pud_offset((mm), (address))
26063 +#define PYD_SIZE PUD_SIZE
26064 +#endif
26065 +
26066 +#ifdef CONFIG_PAX_PER_CPU_PGD
26067 +static inline void pgd_ctor(pgd_t *pgd) {}
26068 +static inline void pgd_dtor(pgd_t *pgd) {}
26069 +#else
26070 static void pgd_ctor(pgd_t *pgd)
26071 {
26072 /* If the pgd points to a shared pagetable level (either the
26073 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
26074 pgd_list_del(pgd);
26075 spin_unlock_irqrestore(&pgd_lock, flags);
26076 }
26077 +#endif
26078
26079 /*
26080 * List of all pgd's needed for non-PAE so it can invalidate entries
26081 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
26082 * -- wli
26083 */
26084
26085 -#ifdef CONFIG_X86_PAE
26086 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26087 /*
26088 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
26089 * updating the top-level pagetable entries to guarantee the
26090 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
26091 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
26092 * and initialize the kernel pmds here.
26093 */
26094 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
26095 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26096
26097 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26098 {
26099 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26100 */
26101 flush_tlb_mm(mm);
26102 }
26103 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
26104 +#define PREALLOCATED_PXDS USER_PGD_PTRS
26105 #else /* !CONFIG_X86_PAE */
26106
26107 /* No need to prepopulate any pagetable entries in non-PAE modes. */
26108 -#define PREALLOCATED_PMDS 0
26109 +#define PREALLOCATED_PXDS 0
26110
26111 #endif /* CONFIG_X86_PAE */
26112
26113 -static void free_pmds(pmd_t *pmds[])
26114 +static void free_pxds(pxd_t *pxds[])
26115 {
26116 int i;
26117
26118 - for(i = 0; i < PREALLOCATED_PMDS; i++)
26119 - if (pmds[i])
26120 - free_page((unsigned long)pmds[i]);
26121 + for(i = 0; i < PREALLOCATED_PXDS; i++)
26122 + if (pxds[i])
26123 + free_page((unsigned long)pxds[i]);
26124 }
26125
26126 -static int preallocate_pmds(pmd_t *pmds[])
26127 +static int preallocate_pxds(pxd_t *pxds[])
26128 {
26129 int i;
26130 bool failed = false;
26131
26132 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
26133 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
26134 - if (pmd == NULL)
26135 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
26136 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
26137 + if (pxd == NULL)
26138 failed = true;
26139 - pmds[i] = pmd;
26140 + pxds[i] = pxd;
26141 }
26142
26143 if (failed) {
26144 - free_pmds(pmds);
26145 + free_pxds(pxds);
26146 return -ENOMEM;
26147 }
26148
26149 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
26150 * preallocate which never got a corresponding vma will need to be
26151 * freed manually.
26152 */
26153 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
26154 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
26155 {
26156 int i;
26157
26158 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
26159 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
26160 pgd_t pgd = pgdp[i];
26161
26162 if (pgd_val(pgd) != 0) {
26163 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
26164 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
26165
26166 - pgdp[i] = native_make_pgd(0);
26167 + set_pgd(pgdp + i, native_make_pgd(0));
26168
26169 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
26170 - pmd_free(mm, pmd);
26171 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
26172 + pxd_free(mm, pxd);
26173 }
26174 }
26175 }
26176
26177 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
26178 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
26179 {
26180 - pud_t *pud;
26181 + pyd_t *pyd;
26182 unsigned long addr;
26183 int i;
26184
26185 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
26186 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
26187 return;
26188
26189 - pud = pud_offset(pgd, 0);
26190 +#ifdef CONFIG_X86_64
26191 + pyd = pyd_offset(mm, 0L);
26192 +#else
26193 + pyd = pyd_offset(pgd, 0L);
26194 +#endif
26195
26196 - for (addr = i = 0; i < PREALLOCATED_PMDS;
26197 - i++, pud++, addr += PUD_SIZE) {
26198 - pmd_t *pmd = pmds[i];
26199 + for (addr = i = 0; i < PREALLOCATED_PXDS;
26200 + i++, pyd++, addr += PYD_SIZE) {
26201 + pxd_t *pxd = pxds[i];
26202
26203 if (i >= KERNEL_PGD_BOUNDARY)
26204 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26205 - sizeof(pmd_t) * PTRS_PER_PMD);
26206 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26207 + sizeof(pxd_t) * PTRS_PER_PMD);
26208
26209 - pud_populate(mm, pud, pmd);
26210 + pyd_populate(mm, pyd, pxd);
26211 }
26212 }
26213
26214 pgd_t *pgd_alloc(struct mm_struct *mm)
26215 {
26216 pgd_t *pgd;
26217 - pmd_t *pmds[PREALLOCATED_PMDS];
26218 + pxd_t *pxds[PREALLOCATED_PXDS];
26219 +
26220 unsigned long flags;
26221
26222 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
26223 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26224
26225 mm->pgd = pgd;
26226
26227 - if (preallocate_pmds(pmds) != 0)
26228 + if (preallocate_pxds(pxds) != 0)
26229 goto out_free_pgd;
26230
26231 if (paravirt_pgd_alloc(mm) != 0)
26232 - goto out_free_pmds;
26233 + goto out_free_pxds;
26234
26235 /*
26236 * Make sure that pre-populating the pmds is atomic with
26237 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26238 spin_lock_irqsave(&pgd_lock, flags);
26239
26240 pgd_ctor(pgd);
26241 - pgd_prepopulate_pmd(mm, pgd, pmds);
26242 + pgd_prepopulate_pxd(mm, pgd, pxds);
26243
26244 spin_unlock_irqrestore(&pgd_lock, flags);
26245
26246 return pgd;
26247
26248 -out_free_pmds:
26249 - free_pmds(pmds);
26250 +out_free_pxds:
26251 + free_pxds(pxds);
26252 out_free_pgd:
26253 free_page((unsigned long)pgd);
26254 out:
26255 @@ -287,7 +338,7 @@ out:
26256
26257 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26258 {
26259 - pgd_mop_up_pmds(mm, pgd);
26260 + pgd_mop_up_pxds(mm, pgd);
26261 pgd_dtor(pgd);
26262 paravirt_pgd_free(mm, pgd);
26263 free_page((unsigned long)pgd);
26264 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26265 index 46c8834..fcab43d 100644
26266 --- a/arch/x86/mm/pgtable_32.c
26267 +++ b/arch/x86/mm/pgtable_32.c
26268 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26269 return;
26270 }
26271 pte = pte_offset_kernel(pmd, vaddr);
26272 +
26273 + pax_open_kernel();
26274 if (pte_val(pteval))
26275 set_pte_at(&init_mm, vaddr, pte, pteval);
26276 else
26277 pte_clear(&init_mm, vaddr, pte);
26278 + pax_close_kernel();
26279
26280 /*
26281 * It's enough to flush this one mapping.
26282 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26283 index 513d8ed..978c161 100644
26284 --- a/arch/x86/mm/setup_nx.c
26285 +++ b/arch/x86/mm/setup_nx.c
26286 @@ -4,11 +4,10 @@
26287
26288 #include <asm/pgtable.h>
26289
26290 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26291 int nx_enabled;
26292
26293 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26294 -static int disable_nx __cpuinitdata;
26295 -
26296 +#ifndef CONFIG_PAX_PAGEEXEC
26297 /*
26298 * noexec = on|off
26299 *
26300 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
26301 if (!str)
26302 return -EINVAL;
26303 if (!strncmp(str, "on", 2)) {
26304 - __supported_pte_mask |= _PAGE_NX;
26305 - disable_nx = 0;
26306 + nx_enabled = 1;
26307 } else if (!strncmp(str, "off", 3)) {
26308 - disable_nx = 1;
26309 - __supported_pte_mask &= ~_PAGE_NX;
26310 + nx_enabled = 0;
26311 }
26312 return 0;
26313 }
26314 early_param("noexec", noexec_setup);
26315 #endif
26316 +#endif
26317
26318 #ifdef CONFIG_X86_PAE
26319 void __init set_nx(void)
26320 {
26321 - unsigned int v[4], l, h;
26322 + if (!nx_enabled && cpu_has_nx) {
26323 + unsigned l, h;
26324
26325 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
26326 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
26327 -
26328 - if ((v[3] & (1 << 20)) && !disable_nx) {
26329 - rdmsr(MSR_EFER, l, h);
26330 - l |= EFER_NX;
26331 - wrmsr(MSR_EFER, l, h);
26332 - nx_enabled = 1;
26333 - __supported_pte_mask |= _PAGE_NX;
26334 - }
26335 + __supported_pte_mask &= ~_PAGE_NX;
26336 + rdmsr(MSR_EFER, l, h);
26337 + l &= ~EFER_NX;
26338 + wrmsr(MSR_EFER, l, h);
26339 }
26340 }
26341 #else
26342 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
26343 unsigned long efer;
26344
26345 rdmsrl(MSR_EFER, efer);
26346 - if (!(efer & EFER_NX) || disable_nx)
26347 + if (!(efer & EFER_NX) || !nx_enabled)
26348 __supported_pte_mask &= ~_PAGE_NX;
26349 }
26350 #endif
26351 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26352 index 36fe08e..b123d3a 100644
26353 --- a/arch/x86/mm/tlb.c
26354 +++ b/arch/x86/mm/tlb.c
26355 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
26356 BUG();
26357 cpumask_clear_cpu(cpu,
26358 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26359 +
26360 +#ifndef CONFIG_PAX_PER_CPU_PGD
26361 load_cr3(swapper_pg_dir);
26362 +#endif
26363 +
26364 }
26365 EXPORT_SYMBOL_GPL(leave_mm);
26366
26367 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26368 index 829edf0..672adb3 100644
26369 --- a/arch/x86/oprofile/backtrace.c
26370 +++ b/arch/x86/oprofile/backtrace.c
26371 @@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26372 {
26373 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26374
26375 - if (!user_mode_vm(regs)) {
26376 + if (!user_mode(regs)) {
26377 unsigned long stack = kernel_stack_pointer(regs);
26378 if (depth)
26379 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26380 diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26381 index e6a160a..36deff6 100644
26382 --- a/arch/x86/oprofile/op_model_p4.c
26383 +++ b/arch/x86/oprofile/op_model_p4.c
26384 @@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26385 #endif
26386 }
26387
26388 -static int inline addr_increment(void)
26389 +static inline int addr_increment(void)
26390 {
26391 #ifdef CONFIG_SMP
26392 return smp_num_siblings == 2 ? 2 : 1;
26393 diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26394 index 1331fcf..03901b2 100644
26395 --- a/arch/x86/pci/common.c
26396 +++ b/arch/x86/pci/common.c
26397 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
26398 int pcibios_last_bus = -1;
26399 unsigned long pirq_table_addr;
26400 struct pci_bus *pci_root_bus;
26401 -struct pci_raw_ops *raw_pci_ops;
26402 -struct pci_raw_ops *raw_pci_ext_ops;
26403 +const struct pci_raw_ops *raw_pci_ops;
26404 +const struct pci_raw_ops *raw_pci_ext_ops;
26405
26406 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26407 int reg, int len, u32 *val)
26408 diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26409 index 347d882..4baf6b6 100644
26410 --- a/arch/x86/pci/direct.c
26411 +++ b/arch/x86/pci/direct.c
26412 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26413
26414 #undef PCI_CONF1_ADDRESS
26415
26416 -struct pci_raw_ops pci_direct_conf1 = {
26417 +const struct pci_raw_ops pci_direct_conf1 = {
26418 .read = pci_conf1_read,
26419 .write = pci_conf1_write,
26420 };
26421 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26422
26423 #undef PCI_CONF2_ADDRESS
26424
26425 -struct pci_raw_ops pci_direct_conf2 = {
26426 +const struct pci_raw_ops pci_direct_conf2 = {
26427 .read = pci_conf2_read,
26428 .write = pci_conf2_write,
26429 };
26430 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26431 * This should be close to trivial, but it isn't, because there are buggy
26432 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26433 */
26434 -static int __init pci_sanity_check(struct pci_raw_ops *o)
26435 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
26436 {
26437 u32 x = 0;
26438 int year, devfn;
26439 diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26440 index f10a7e9..0425342 100644
26441 --- a/arch/x86/pci/mmconfig_32.c
26442 +++ b/arch/x86/pci/mmconfig_32.c
26443 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26444 return 0;
26445 }
26446
26447 -static struct pci_raw_ops pci_mmcfg = {
26448 +static const struct pci_raw_ops pci_mmcfg = {
26449 .read = pci_mmcfg_read,
26450 .write = pci_mmcfg_write,
26451 };
26452 diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26453 index 94349f8..41600a7 100644
26454 --- a/arch/x86/pci/mmconfig_64.c
26455 +++ b/arch/x86/pci/mmconfig_64.c
26456 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26457 return 0;
26458 }
26459
26460 -static struct pci_raw_ops pci_mmcfg = {
26461 +static const struct pci_raw_ops pci_mmcfg = {
26462 .read = pci_mmcfg_read,
26463 .write = pci_mmcfg_write,
26464 };
26465 diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26466 index 8eb295e..86bd657 100644
26467 --- a/arch/x86/pci/numaq_32.c
26468 +++ b/arch/x86/pci/numaq_32.c
26469 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26470
26471 #undef PCI_CONF1_MQ_ADDRESS
26472
26473 -static struct pci_raw_ops pci_direct_conf1_mq = {
26474 +static const struct pci_raw_ops pci_direct_conf1_mq = {
26475 .read = pci_conf1_mq_read,
26476 .write = pci_conf1_mq_write
26477 };
26478 diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26479 index b889d82..5a58a0a 100644
26480 --- a/arch/x86/pci/olpc.c
26481 +++ b/arch/x86/pci/olpc.c
26482 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26483 return 0;
26484 }
26485
26486 -static struct pci_raw_ops pci_olpc_conf = {
26487 +static const struct pci_raw_ops pci_olpc_conf = {
26488 .read = pci_olpc_read,
26489 .write = pci_olpc_write,
26490 };
26491 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26492 index 1c975cc..b8e16c2 100644
26493 --- a/arch/x86/pci/pcbios.c
26494 +++ b/arch/x86/pci/pcbios.c
26495 @@ -56,50 +56,93 @@ union bios32 {
26496 static struct {
26497 unsigned long address;
26498 unsigned short segment;
26499 -} bios32_indirect = { 0, __KERNEL_CS };
26500 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26501
26502 /*
26503 * Returns the entry point for the given service, NULL on error
26504 */
26505
26506 -static unsigned long bios32_service(unsigned long service)
26507 +static unsigned long __devinit bios32_service(unsigned long service)
26508 {
26509 unsigned char return_code; /* %al */
26510 unsigned long address; /* %ebx */
26511 unsigned long length; /* %ecx */
26512 unsigned long entry; /* %edx */
26513 unsigned long flags;
26514 + struct desc_struct d, *gdt;
26515
26516 local_irq_save(flags);
26517 - __asm__("lcall *(%%edi); cld"
26518 +
26519 + gdt = get_cpu_gdt_table(smp_processor_id());
26520 +
26521 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26522 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26523 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26524 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26525 +
26526 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26527 : "=a" (return_code),
26528 "=b" (address),
26529 "=c" (length),
26530 "=d" (entry)
26531 : "0" (service),
26532 "1" (0),
26533 - "D" (&bios32_indirect));
26534 + "D" (&bios32_indirect),
26535 + "r"(__PCIBIOS_DS)
26536 + : "memory");
26537 +
26538 + pax_open_kernel();
26539 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26540 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26541 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26542 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26543 + pax_close_kernel();
26544 +
26545 local_irq_restore(flags);
26546
26547 switch (return_code) {
26548 - case 0:
26549 - return address + entry;
26550 - case 0x80: /* Not present */
26551 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26552 - return 0;
26553 - default: /* Shouldn't happen */
26554 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26555 - service, return_code);
26556 + case 0: {
26557 + int cpu;
26558 + unsigned char flags;
26559 +
26560 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26561 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26562 + printk(KERN_WARNING "bios32_service: not valid\n");
26563 return 0;
26564 + }
26565 + address = address + PAGE_OFFSET;
26566 + length += 16UL; /* some BIOSs underreport this... */
26567 + flags = 4;
26568 + if (length >= 64*1024*1024) {
26569 + length >>= PAGE_SHIFT;
26570 + flags |= 8;
26571 + }
26572 +
26573 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26574 + gdt = get_cpu_gdt_table(cpu);
26575 + pack_descriptor(&d, address, length, 0x9b, flags);
26576 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26577 + pack_descriptor(&d, address, length, 0x93, flags);
26578 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26579 + }
26580 + return entry;
26581 + }
26582 + case 0x80: /* Not present */
26583 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26584 + return 0;
26585 + default: /* Shouldn't happen */
26586 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26587 + service, return_code);
26588 + return 0;
26589 }
26590 }
26591
26592 static struct {
26593 unsigned long address;
26594 unsigned short segment;
26595 -} pci_indirect = { 0, __KERNEL_CS };
26596 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26597
26598 -static int pci_bios_present;
26599 +static int pci_bios_present __read_only;
26600
26601 static int __devinit check_pcibios(void)
26602 {
26603 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26604 unsigned long flags, pcibios_entry;
26605
26606 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26607 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26608 + pci_indirect.address = pcibios_entry;
26609
26610 local_irq_save(flags);
26611 - __asm__(
26612 - "lcall *(%%edi); cld\n\t"
26613 + __asm__("movw %w6, %%ds\n\t"
26614 + "lcall *%%ss:(%%edi); cld\n\t"
26615 + "push %%ss\n\t"
26616 + "pop %%ds\n\t"
26617 "jc 1f\n\t"
26618 "xor %%ah, %%ah\n"
26619 "1:"
26620 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26621 "=b" (ebx),
26622 "=c" (ecx)
26623 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26624 - "D" (&pci_indirect)
26625 + "D" (&pci_indirect),
26626 + "r" (__PCIBIOS_DS)
26627 : "memory");
26628 local_irq_restore(flags);
26629
26630 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26631
26632 switch (len) {
26633 case 1:
26634 - __asm__("lcall *(%%esi); cld\n\t"
26635 + __asm__("movw %w6, %%ds\n\t"
26636 + "lcall *%%ss:(%%esi); cld\n\t"
26637 + "push %%ss\n\t"
26638 + "pop %%ds\n\t"
26639 "jc 1f\n\t"
26640 "xor %%ah, %%ah\n"
26641 "1:"
26642 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26643 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26644 "b" (bx),
26645 "D" ((long)reg),
26646 - "S" (&pci_indirect));
26647 + "S" (&pci_indirect),
26648 + "r" (__PCIBIOS_DS));
26649 /*
26650 * Zero-extend the result beyond 8 bits, do not trust the
26651 * BIOS having done it:
26652 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26653 *value &= 0xff;
26654 break;
26655 case 2:
26656 - __asm__("lcall *(%%esi); cld\n\t"
26657 + __asm__("movw %w6, %%ds\n\t"
26658 + "lcall *%%ss:(%%esi); cld\n\t"
26659 + "push %%ss\n\t"
26660 + "pop %%ds\n\t"
26661 "jc 1f\n\t"
26662 "xor %%ah, %%ah\n"
26663 "1:"
26664 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26665 : "1" (PCIBIOS_READ_CONFIG_WORD),
26666 "b" (bx),
26667 "D" ((long)reg),
26668 - "S" (&pci_indirect));
26669 + "S" (&pci_indirect),
26670 + "r" (__PCIBIOS_DS));
26671 /*
26672 * Zero-extend the result beyond 16 bits, do not trust the
26673 * BIOS having done it:
26674 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26675 *value &= 0xffff;
26676 break;
26677 case 4:
26678 - __asm__("lcall *(%%esi); cld\n\t"
26679 + __asm__("movw %w6, %%ds\n\t"
26680 + "lcall *%%ss:(%%esi); cld\n\t"
26681 + "push %%ss\n\t"
26682 + "pop %%ds\n\t"
26683 "jc 1f\n\t"
26684 "xor %%ah, %%ah\n"
26685 "1:"
26686 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26687 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26688 "b" (bx),
26689 "D" ((long)reg),
26690 - "S" (&pci_indirect));
26691 + "S" (&pci_indirect),
26692 + "r" (__PCIBIOS_DS));
26693 break;
26694 }
26695
26696 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26697
26698 switch (len) {
26699 case 1:
26700 - __asm__("lcall *(%%esi); cld\n\t"
26701 + __asm__("movw %w6, %%ds\n\t"
26702 + "lcall *%%ss:(%%esi); cld\n\t"
26703 + "push %%ss\n\t"
26704 + "pop %%ds\n\t"
26705 "jc 1f\n\t"
26706 "xor %%ah, %%ah\n"
26707 "1:"
26708 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26709 "c" (value),
26710 "b" (bx),
26711 "D" ((long)reg),
26712 - "S" (&pci_indirect));
26713 + "S" (&pci_indirect),
26714 + "r" (__PCIBIOS_DS));
26715 break;
26716 case 2:
26717 - __asm__("lcall *(%%esi); cld\n\t"
26718 + __asm__("movw %w6, %%ds\n\t"
26719 + "lcall *%%ss:(%%esi); cld\n\t"
26720 + "push %%ss\n\t"
26721 + "pop %%ds\n\t"
26722 "jc 1f\n\t"
26723 "xor %%ah, %%ah\n"
26724 "1:"
26725 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26726 "c" (value),
26727 "b" (bx),
26728 "D" ((long)reg),
26729 - "S" (&pci_indirect));
26730 + "S" (&pci_indirect),
26731 + "r" (__PCIBIOS_DS));
26732 break;
26733 case 4:
26734 - __asm__("lcall *(%%esi); cld\n\t"
26735 + __asm__("movw %w6, %%ds\n\t"
26736 + "lcall *%%ss:(%%esi); cld\n\t"
26737 + "push %%ss\n\t"
26738 + "pop %%ds\n\t"
26739 "jc 1f\n\t"
26740 "xor %%ah, %%ah\n"
26741 "1:"
26742 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26743 "c" (value),
26744 "b" (bx),
26745 "D" ((long)reg),
26746 - "S" (&pci_indirect));
26747 + "S" (&pci_indirect),
26748 + "r" (__PCIBIOS_DS));
26749 break;
26750 }
26751
26752 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26753 * Function table for BIOS32 access
26754 */
26755
26756 -static struct pci_raw_ops pci_bios_access = {
26757 +static const struct pci_raw_ops pci_bios_access = {
26758 .read = pci_bios_read,
26759 .write = pci_bios_write
26760 };
26761 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26762 * Try to find PCI BIOS.
26763 */
26764
26765 -static struct pci_raw_ops * __devinit pci_find_bios(void)
26766 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
26767 {
26768 union bios32 *check;
26769 unsigned char sum;
26770 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26771
26772 DBG("PCI: Fetching IRQ routing table... ");
26773 __asm__("push %%es\n\t"
26774 + "movw %w8, %%ds\n\t"
26775 "push %%ds\n\t"
26776 "pop %%es\n\t"
26777 - "lcall *(%%esi); cld\n\t"
26778 + "lcall *%%ss:(%%esi); cld\n\t"
26779 "pop %%es\n\t"
26780 + "push %%ss\n\t"
26781 + "pop %%ds\n"
26782 "jc 1f\n\t"
26783 "xor %%ah, %%ah\n"
26784 "1:"
26785 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26786 "1" (0),
26787 "D" ((long) &opt),
26788 "S" (&pci_indirect),
26789 - "m" (opt)
26790 + "m" (opt),
26791 + "r" (__PCIBIOS_DS)
26792 : "memory");
26793 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26794 if (ret & 0xff00)
26795 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26796 {
26797 int ret;
26798
26799 - __asm__("lcall *(%%esi); cld\n\t"
26800 + __asm__("movw %w5, %%ds\n\t"
26801 + "lcall *%%ss:(%%esi); cld\n\t"
26802 + "push %%ss\n\t"
26803 + "pop %%ds\n"
26804 "jc 1f\n\t"
26805 "xor %%ah, %%ah\n"
26806 "1:"
26807 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26808 : "0" (PCIBIOS_SET_PCI_HW_INT),
26809 "b" ((dev->bus->number << 8) | dev->devfn),
26810 "c" ((irq << 8) | (pin + 10)),
26811 - "S" (&pci_indirect));
26812 + "S" (&pci_indirect),
26813 + "r" (__PCIBIOS_DS));
26814 return !(ret & 0xff00);
26815 }
26816 EXPORT_SYMBOL(pcibios_set_irq_routing);
26817 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26818 index fa0f651..9d8f3d9 100644
26819 --- a/arch/x86/power/cpu.c
26820 +++ b/arch/x86/power/cpu.c
26821 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
26822 static void fix_processor_context(void)
26823 {
26824 int cpu = smp_processor_id();
26825 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26826 + struct tss_struct *t = init_tss + cpu;
26827
26828 set_tss_desc(cpu, t); /*
26829 * This just modifies memory; should not be
26830 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
26831 */
26832
26833 #ifdef CONFIG_X86_64
26834 + pax_open_kernel();
26835 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26836 + pax_close_kernel();
26837
26838 syscall_init(); /* This sets MSR_*STAR and related */
26839 #endif
26840 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26841 index dd78ef6..f9d928d 100644
26842 --- a/arch/x86/vdso/Makefile
26843 +++ b/arch/x86/vdso/Makefile
26844 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26845 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26846 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26847
26848 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26849 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26850 GCOV_PROFILE := n
26851
26852 #
26853 diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26854 index ee55754..0013b2e 100644
26855 --- a/arch/x86/vdso/vclock_gettime.c
26856 +++ b/arch/x86/vdso/vclock_gettime.c
26857 @@ -22,24 +22,48 @@
26858 #include <asm/hpet.h>
26859 #include <asm/unistd.h>
26860 #include <asm/io.h>
26861 +#include <asm/fixmap.h>
26862 #include "vextern.h"
26863
26864 #define gtod vdso_vsyscall_gtod_data
26865
26866 +notrace noinline long __vdso_fallback_time(long *t)
26867 +{
26868 + long secs;
26869 + asm volatile("syscall"
26870 + : "=a" (secs)
26871 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26872 + return secs;
26873 +}
26874 +
26875 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26876 {
26877 long ret;
26878 asm("syscall" : "=a" (ret) :
26879 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26880 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26881 return ret;
26882 }
26883
26884 +notrace static inline cycle_t __vdso_vread_hpet(void)
26885 +{
26886 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26887 +}
26888 +
26889 +notrace static inline cycle_t __vdso_vread_tsc(void)
26890 +{
26891 + cycle_t ret = (cycle_t)vget_cycles();
26892 +
26893 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26894 +}
26895 +
26896 notrace static inline long vgetns(void)
26897 {
26898 long v;
26899 - cycles_t (*vread)(void);
26900 - vread = gtod->clock.vread;
26901 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26902 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26903 + v = __vdso_vread_tsc();
26904 + else
26905 + v = __vdso_vread_hpet();
26906 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26907 return (v * gtod->clock.mult) >> gtod->clock.shift;
26908 }
26909
26910 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26911
26912 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26913 {
26914 - if (likely(gtod->sysctl_enabled))
26915 + if (likely(gtod->sysctl_enabled &&
26916 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26917 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26918 switch (clock) {
26919 case CLOCK_REALTIME:
26920 if (likely(gtod->clock.vread))
26921 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26922 int clock_gettime(clockid_t, struct timespec *)
26923 __attribute__((weak, alias("__vdso_clock_gettime")));
26924
26925 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26926 +{
26927 + long ret;
26928 + asm("syscall" : "=a" (ret) :
26929 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26930 + return ret;
26931 +}
26932 +
26933 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26934 {
26935 - long ret;
26936 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26937 + if (likely(gtod->sysctl_enabled &&
26938 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26939 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26940 + {
26941 if (likely(tv != NULL)) {
26942 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26943 offsetof(struct timespec, tv_nsec) ||
26944 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26945 }
26946 return 0;
26947 }
26948 - asm("syscall" : "=a" (ret) :
26949 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26950 - return ret;
26951 + return __vdso_fallback_gettimeofday(tv, tz);
26952 }
26953 int gettimeofday(struct timeval *, struct timezone *)
26954 __attribute__((weak, alias("__vdso_gettimeofday")));
26955 diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26956 index 4e5dd3b..00ba15e 100644
26957 --- a/arch/x86/vdso/vdso.lds.S
26958 +++ b/arch/x86/vdso/vdso.lds.S
26959 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26960 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26961 #include "vextern.h"
26962 #undef VEXTERN
26963 +
26964 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26965 +VEXTERN(fallback_gettimeofday)
26966 +VEXTERN(fallback_time)
26967 +VEXTERN(getcpu)
26968 +#undef VEXTERN
26969 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26970 index 58bc00f..d53fb48 100644
26971 --- a/arch/x86/vdso/vdso32-setup.c
26972 +++ b/arch/x86/vdso/vdso32-setup.c
26973 @@ -25,6 +25,7 @@
26974 #include <asm/tlbflush.h>
26975 #include <asm/vdso.h>
26976 #include <asm/proto.h>
26977 +#include <asm/mman.h>
26978
26979 enum {
26980 VDSO_DISABLED = 0,
26981 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26982 void enable_sep_cpu(void)
26983 {
26984 int cpu = get_cpu();
26985 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26986 + struct tss_struct *tss = init_tss + cpu;
26987
26988 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26989 put_cpu();
26990 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26991 gate_vma.vm_start = FIXADDR_USER_START;
26992 gate_vma.vm_end = FIXADDR_USER_END;
26993 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26994 - gate_vma.vm_page_prot = __P101;
26995 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26996 /*
26997 * Make sure the vDSO gets into every core dump.
26998 * Dumping its contents makes post-mortem fully interpretable later
26999 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27000 if (compat)
27001 addr = VDSO_HIGH_BASE;
27002 else {
27003 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
27004 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
27005 if (IS_ERR_VALUE(addr)) {
27006 ret = addr;
27007 goto up_fail;
27008 }
27009 }
27010
27011 - current->mm->context.vdso = (void *)addr;
27012 + current->mm->context.vdso = addr;
27013
27014 if (compat_uses_vma || !compat) {
27015 /*
27016 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27017 }
27018
27019 current_thread_info()->sysenter_return =
27020 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27021 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27022
27023 up_fail:
27024 if (ret)
27025 - current->mm->context.vdso = NULL;
27026 + current->mm->context.vdso = 0;
27027
27028 up_write(&mm->mmap_sem);
27029
27030 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
27031
27032 const char *arch_vma_name(struct vm_area_struct *vma)
27033 {
27034 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27035 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27036 return "[vdso]";
27037 +
27038 +#ifdef CONFIG_PAX_SEGMEXEC
27039 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27040 + return "[vdso]";
27041 +#endif
27042 +
27043 return NULL;
27044 }
27045
27046 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
27047 struct mm_struct *mm = tsk->mm;
27048
27049 /* Check to see if this task was created in compat vdso mode */
27050 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27051 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27052 return &gate_vma;
27053 return NULL;
27054 }
27055 diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
27056 index 1683ba2..48d07f3 100644
27057 --- a/arch/x86/vdso/vextern.h
27058 +++ b/arch/x86/vdso/vextern.h
27059 @@ -11,6 +11,5 @@
27060 put into vextern.h and be referenced as a pointer with vdso prefix.
27061 The main kernel later fills in the values. */
27062
27063 -VEXTERN(jiffies)
27064 VEXTERN(vgetcpu_mode)
27065 VEXTERN(vsyscall_gtod_data)
27066 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27067 index 21e1aeb..2c0b3c4 100644
27068 --- a/arch/x86/vdso/vma.c
27069 +++ b/arch/x86/vdso/vma.c
27070 @@ -17,8 +17,6 @@
27071 #include "vextern.h" /* Just for VMAGIC. */
27072 #undef VEXTERN
27073
27074 -unsigned int __read_mostly vdso_enabled = 1;
27075 -
27076 extern char vdso_start[], vdso_end[];
27077 extern unsigned short vdso_sync_cpuid;
27078
27079 @@ -27,10 +25,8 @@ static unsigned vdso_size;
27080
27081 static inline void *var_ref(void *p, char *name)
27082 {
27083 - if (*(void **)p != (void *)VMAGIC) {
27084 - printk("VDSO: variable %s broken\n", name);
27085 - vdso_enabled = 0;
27086 - }
27087 + if (*(void **)p != (void *)VMAGIC)
27088 + panic("VDSO: variable %s broken\n", name);
27089 return p;
27090 }
27091
27092 @@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
27093 if (!vbase)
27094 goto oom;
27095
27096 - if (memcmp(vbase, "\177ELF", 4)) {
27097 - printk("VDSO: I'm broken; not ELF\n");
27098 - vdso_enabled = 0;
27099 - }
27100 + if (memcmp(vbase, ELFMAG, SELFMAG))
27101 + panic("VDSO: I'm broken; not ELF\n");
27102
27103 #define VEXTERN(x) \
27104 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
27105 #include "vextern.h"
27106 #undef VEXTERN
27107 + vunmap(vbase);
27108 return 0;
27109
27110 oom:
27111 - printk("Cannot allocate vdso\n");
27112 - vdso_enabled = 0;
27113 - return -ENOMEM;
27114 + panic("Cannot allocate vdso\n");
27115 }
27116 __initcall(init_vdso_vars);
27117
27118 @@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27119 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27120 {
27121 struct mm_struct *mm = current->mm;
27122 - unsigned long addr;
27123 + unsigned long addr = 0;
27124 int ret;
27125
27126 - if (!vdso_enabled)
27127 - return 0;
27128 -
27129 down_write(&mm->mmap_sem);
27130 +
27131 +#ifdef CONFIG_PAX_RANDMMAP
27132 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27133 +#endif
27134 +
27135 addr = vdso_addr(mm->start_stack, vdso_size);
27136 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
27137 if (IS_ERR_VALUE(addr)) {
27138 @@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27139 goto up_fail;
27140 }
27141
27142 - current->mm->context.vdso = (void *)addr;
27143 + current->mm->context.vdso = addr;
27144
27145 ret = install_special_mapping(mm, addr, vdso_size,
27146 VM_READ|VM_EXEC|
27147 @@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27148 VM_ALWAYSDUMP,
27149 vdso_pages);
27150 if (ret) {
27151 - current->mm->context.vdso = NULL;
27152 + current->mm->context.vdso = 0;
27153 goto up_fail;
27154 }
27155
27156 @@ -132,10 +127,3 @@ up_fail:
27157 up_write(&mm->mmap_sem);
27158 return ret;
27159 }
27160 -
27161 -static __init int vdso_setup(char *s)
27162 -{
27163 - vdso_enabled = simple_strtoul(s, NULL, 0);
27164 - return 0;
27165 -}
27166 -__setup("vdso=", vdso_setup);
27167 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27168 index 0087b00..eecb34f 100644
27169 --- a/arch/x86/xen/enlighten.c
27170 +++ b/arch/x86/xen/enlighten.c
27171 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27172
27173 struct shared_info xen_dummy_shared_info;
27174
27175 -void *xen_initial_gdt;
27176 -
27177 /*
27178 * Point at some empty memory to start with. We map the real shared_info
27179 * page as soon as fixmap is up and running.
27180 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
27181
27182 preempt_disable();
27183
27184 - start = __get_cpu_var(idt_desc).address;
27185 + start = (unsigned long)__get_cpu_var(idt_desc).address;
27186 end = start + __get_cpu_var(idt_desc).size + 1;
27187
27188 xen_mc_flush();
27189 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
27190 #endif
27191 };
27192
27193 -static void xen_reboot(int reason)
27194 +static __noreturn void xen_reboot(int reason)
27195 {
27196 struct sched_shutdown r = { .reason = reason };
27197
27198 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
27199 BUG();
27200 }
27201
27202 -static void xen_restart(char *msg)
27203 +static __noreturn void xen_restart(char *msg)
27204 {
27205 xen_reboot(SHUTDOWN_reboot);
27206 }
27207
27208 -static void xen_emergency_restart(void)
27209 +static __noreturn void xen_emergency_restart(void)
27210 {
27211 xen_reboot(SHUTDOWN_reboot);
27212 }
27213
27214 -static void xen_machine_halt(void)
27215 +static __noreturn void xen_machine_halt(void)
27216 {
27217 xen_reboot(SHUTDOWN_poweroff);
27218 }
27219 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
27220 */
27221 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27222
27223 -#ifdef CONFIG_X86_64
27224 /* Work out if we support NX */
27225 - check_efer();
27226 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27227 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27228 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27229 + unsigned l, h;
27230 +
27231 +#ifdef CONFIG_X86_PAE
27232 + nx_enabled = 1;
27233 +#endif
27234 + __supported_pte_mask |= _PAGE_NX;
27235 + rdmsr(MSR_EFER, l, h);
27236 + l |= EFER_NX;
27237 + wrmsr(MSR_EFER, l, h);
27238 + }
27239 #endif
27240
27241 xen_setup_features();
27242 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
27243
27244 machine_ops = xen_machine_ops;
27245
27246 - /*
27247 - * The only reliable way to retain the initial address of the
27248 - * percpu gdt_page is to remember it here, so we can go and
27249 - * mark it RW later, when the initial percpu area is freed.
27250 - */
27251 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27252 -
27253 xen_smp_init();
27254
27255 pgd = (pgd_t *)xen_start_info->pt_base;
27256 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27257 index 3f90a2c..2c2ad84 100644
27258 --- a/arch/x86/xen/mmu.c
27259 +++ b/arch/x86/xen/mmu.c
27260 @@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27261 convert_pfn_mfn(init_level4_pgt);
27262 convert_pfn_mfn(level3_ident_pgt);
27263 convert_pfn_mfn(level3_kernel_pgt);
27264 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27265 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27266 + convert_pfn_mfn(level3_vmemmap_pgt);
27267
27268 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27269 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27270 @@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27271 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27272 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27273 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27274 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27275 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27276 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27277 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27278 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27279 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27280 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27281
27282 @@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
27283 pv_mmu_ops.set_pud = xen_set_pud;
27284 #if PAGETABLE_LEVELS == 4
27285 pv_mmu_ops.set_pgd = xen_set_pgd;
27286 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27287 #endif
27288
27289 /* This will work as long as patching hasn't happened yet
27290 @@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
27291 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27292 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27293 .set_pgd = xen_set_pgd_hyper,
27294 + .set_pgd_batched = xen_set_pgd_hyper,
27295
27296 .alloc_pud = xen_alloc_pmd_init,
27297 .release_pud = xen_release_pmd_init,
27298 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27299 index a96204a..fca9b8e 100644
27300 --- a/arch/x86/xen/smp.c
27301 +++ b/arch/x86/xen/smp.c
27302 @@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27303 {
27304 BUG_ON(smp_processor_id() != 0);
27305 native_smp_prepare_boot_cpu();
27306 -
27307 - /* We've switched to the "real" per-cpu gdt, so make sure the
27308 - old memory can be recycled */
27309 - make_lowmem_page_readwrite(xen_initial_gdt);
27310 -
27311 xen_setup_vcpu_info_placement();
27312 }
27313
27314 @@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27315 gdt = get_cpu_gdt_table(cpu);
27316
27317 ctxt->flags = VGCF_IN_KERNEL;
27318 - ctxt->user_regs.ds = __USER_DS;
27319 - ctxt->user_regs.es = __USER_DS;
27320 + ctxt->user_regs.ds = __KERNEL_DS;
27321 + ctxt->user_regs.es = __KERNEL_DS;
27322 ctxt->user_regs.ss = __KERNEL_DS;
27323 #ifdef CONFIG_X86_32
27324 ctxt->user_regs.fs = __KERNEL_PERCPU;
27325 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27326 + savesegment(gs, ctxt->user_regs.gs);
27327 #else
27328 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27329 #endif
27330 @@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27331 int rc;
27332
27333 per_cpu(current_task, cpu) = idle;
27334 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27335 #ifdef CONFIG_X86_32
27336 irq_ctx_init(cpu);
27337 #else
27338 clear_tsk_thread_flag(idle, TIF_FORK);
27339 - per_cpu(kernel_stack, cpu) =
27340 - (unsigned long)task_stack_page(idle) -
27341 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27342 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27343 #endif
27344 xen_setup_runstate_info(cpu);
27345 xen_setup_timer(cpu);
27346 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27347 index 9a95a9c..4f39e774 100644
27348 --- a/arch/x86/xen/xen-asm_32.S
27349 +++ b/arch/x86/xen/xen-asm_32.S
27350 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27351 ESP_OFFSET=4 # bytes pushed onto stack
27352
27353 /*
27354 - * Store vcpu_info pointer for easy access. Do it this way to
27355 - * avoid having to reload %fs
27356 + * Store vcpu_info pointer for easy access.
27357 */
27358 #ifdef CONFIG_SMP
27359 - GET_THREAD_INFO(%eax)
27360 - movl TI_cpu(%eax), %eax
27361 - movl __per_cpu_offset(,%eax,4), %eax
27362 - mov per_cpu__xen_vcpu(%eax), %eax
27363 + push %fs
27364 + mov $(__KERNEL_PERCPU), %eax
27365 + mov %eax, %fs
27366 + mov PER_CPU_VAR(xen_vcpu), %eax
27367 + pop %fs
27368 #else
27369 movl per_cpu__xen_vcpu, %eax
27370 #endif
27371 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27372 index 1a5ff24..a187d40 100644
27373 --- a/arch/x86/xen/xen-head.S
27374 +++ b/arch/x86/xen/xen-head.S
27375 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27376 #ifdef CONFIG_X86_32
27377 mov %esi,xen_start_info
27378 mov $init_thread_union+THREAD_SIZE,%esp
27379 +#ifdef CONFIG_SMP
27380 + movl $cpu_gdt_table,%edi
27381 + movl $__per_cpu_load,%eax
27382 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27383 + rorl $16,%eax
27384 + movb %al,__KERNEL_PERCPU + 4(%edi)
27385 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27386 + movl $__per_cpu_end - 1,%eax
27387 + subl $__per_cpu_start,%eax
27388 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27389 +#endif
27390 #else
27391 mov %rsi,xen_start_info
27392 mov $init_thread_union+THREAD_SIZE,%rsp
27393 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27394 index f9153a3..51eab3d 100644
27395 --- a/arch/x86/xen/xen-ops.h
27396 +++ b/arch/x86/xen/xen-ops.h
27397 @@ -10,8 +10,6 @@
27398 extern const char xen_hypervisor_callback[];
27399 extern const char xen_failsafe_callback[];
27400
27401 -extern void *xen_initial_gdt;
27402 -
27403 struct trap_info;
27404 void xen_copy_trap_info(struct trap_info *traps);
27405
27406 diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27407 index 15c6308..96e83c2 100644
27408 --- a/block/blk-integrity.c
27409 +++ b/block/blk-integrity.c
27410 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27411 NULL,
27412 };
27413
27414 -static struct sysfs_ops integrity_ops = {
27415 +static const struct sysfs_ops integrity_ops = {
27416 .show = &integrity_attr_show,
27417 .store = &integrity_attr_store,
27418 };
27419 diff --git a/block/blk-ioc.c b/block/blk-ioc.c
27420 index d4ed600..cbdabb0 100644
27421 --- a/block/blk-ioc.c
27422 +++ b/block/blk-ioc.c
27423 @@ -66,22 +66,22 @@ static void cfq_exit(struct io_context *ioc)
27424 }
27425
27426 /* Called by the exitting task */
27427 -void exit_io_context(void)
27428 +void exit_io_context(struct task_struct *task)
27429 {
27430 struct io_context *ioc;
27431
27432 - task_lock(current);
27433 - ioc = current->io_context;
27434 - current->io_context = NULL;
27435 - task_unlock(current);
27436 + task_lock(task);
27437 + ioc = task->io_context;
27438 + task->io_context = NULL;
27439 + task_unlock(task);
27440
27441 if (atomic_dec_and_test(&ioc->nr_tasks)) {
27442 if (ioc->aic && ioc->aic->exit)
27443 ioc->aic->exit(ioc->aic);
27444 cfq_exit(ioc);
27445
27446 - put_io_context(ioc);
27447 }
27448 + put_io_context(ioc);
27449 }
27450
27451 struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
27452 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27453 index ca56420..f2fc409 100644
27454 --- a/block/blk-iopoll.c
27455 +++ b/block/blk-iopoll.c
27456 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27457 }
27458 EXPORT_SYMBOL(blk_iopoll_complete);
27459
27460 -static void blk_iopoll_softirq(struct softirq_action *h)
27461 +static void blk_iopoll_softirq(void)
27462 {
27463 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27464 int rearm = 0, budget = blk_iopoll_budget;
27465 diff --git a/block/blk-map.c b/block/blk-map.c
27466 index 30a7e51..0aeec6a 100644
27467 --- a/block/blk-map.c
27468 +++ b/block/blk-map.c
27469 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27470 * direct dma. else, set up kernel bounce buffers
27471 */
27472 uaddr = (unsigned long) ubuf;
27473 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
27474 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27475 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27476 else
27477 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27478 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27479 for (i = 0; i < iov_count; i++) {
27480 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27481
27482 + if (!iov[i].iov_len)
27483 + return -EINVAL;
27484 +
27485 if (uaddr & queue_dma_alignment(q)) {
27486 unaligned = 1;
27487 break;
27488 }
27489 - if (!iov[i].iov_len)
27490 - return -EINVAL;
27491 }
27492
27493 if (unaligned || (q->dma_pad_mask & len) || map_data)
27494 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27495 if (!len || !kbuf)
27496 return -EINVAL;
27497
27498 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27499 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27500 if (do_copy)
27501 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27502 else
27503 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27504 index ee9c216..58d410a 100644
27505 --- a/block/blk-softirq.c
27506 +++ b/block/blk-softirq.c
27507 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27508 * Softirq action handler - move entries to local list and loop over them
27509 * while passing them to the queue registered handler.
27510 */
27511 -static void blk_done_softirq(struct softirq_action *h)
27512 +static void blk_done_softirq(void)
27513 {
27514 struct list_head *cpu_list, local_list;
27515
27516 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27517 index bb9c5ea..5330d48 100644
27518 --- a/block/blk-sysfs.c
27519 +++ b/block/blk-sysfs.c
27520 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27521 kmem_cache_free(blk_requestq_cachep, q);
27522 }
27523
27524 -static struct sysfs_ops queue_sysfs_ops = {
27525 +static const struct sysfs_ops queue_sysfs_ops = {
27526 .show = queue_attr_show,
27527 .store = queue_attr_store,
27528 };
27529 diff --git a/block/bsg.c b/block/bsg.c
27530 index 7154a7a..08ac2f0 100644
27531 --- a/block/bsg.c
27532 +++ b/block/bsg.c
27533 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27534 struct sg_io_v4 *hdr, struct bsg_device *bd,
27535 fmode_t has_write_perm)
27536 {
27537 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27538 + unsigned char *cmdptr;
27539 +
27540 if (hdr->request_len > BLK_MAX_CDB) {
27541 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27542 if (!rq->cmd)
27543 return -ENOMEM;
27544 - }
27545 + cmdptr = rq->cmd;
27546 + } else
27547 + cmdptr = tmpcmd;
27548
27549 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27550 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27551 hdr->request_len))
27552 return -EFAULT;
27553
27554 + if (cmdptr != rq->cmd)
27555 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27556 +
27557 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27558 if (blk_verify_command(rq->cmd, has_write_perm))
27559 return -EPERM;
27560 @@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27561 rq->next_rq = next_rq;
27562 next_rq->cmd_type = rq->cmd_type;
27563
27564 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27565 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27566 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27567 hdr->din_xfer_len, GFP_KERNEL);
27568 if (ret)
27569 @@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27570
27571 if (hdr->dout_xfer_len) {
27572 dxfer_len = hdr->dout_xfer_len;
27573 - dxferp = (void*)(unsigned long)hdr->dout_xferp;
27574 + dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27575 } else if (hdr->din_xfer_len) {
27576 dxfer_len = hdr->din_xfer_len;
27577 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27578 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27579 } else
27580 dxfer_len = 0;
27581
27582 @@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27583 int len = min_t(unsigned int, hdr->max_response_len,
27584 rq->sense_len);
27585
27586 - ret = copy_to_user((void*)(unsigned long)hdr->response,
27587 + ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27588 rq->sense, len);
27589 if (!ret)
27590 hdr->response_len = len;
27591 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27592 index 9bd086c..ca1fc22 100644
27593 --- a/block/compat_ioctl.c
27594 +++ b/block/compat_ioctl.c
27595 @@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27596 err |= __get_user(f->spec1, &uf->spec1);
27597 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27598 err |= __get_user(name, &uf->name);
27599 - f->name = compat_ptr(name);
27600 + f->name = (void __force_kernel *)compat_ptr(name);
27601 if (err) {
27602 err = -EFAULT;
27603 goto out;
27604 diff --git a/block/elevator.c b/block/elevator.c
27605 index a847046..75a1746 100644
27606 --- a/block/elevator.c
27607 +++ b/block/elevator.c
27608 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27609 return error;
27610 }
27611
27612 -static struct sysfs_ops elv_sysfs_ops = {
27613 +static const struct sysfs_ops elv_sysfs_ops = {
27614 .show = elv_attr_show,
27615 .store = elv_attr_store,
27616 };
27617 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27618 index 2be0a97..bded3fd 100644
27619 --- a/block/scsi_ioctl.c
27620 +++ b/block/scsi_ioctl.c
27621 @@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
27622 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27623 struct sg_io_hdr *hdr, fmode_t mode)
27624 {
27625 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27626 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27627 + unsigned char *cmdptr;
27628 +
27629 + if (rq->cmd != rq->__cmd)
27630 + cmdptr = rq->cmd;
27631 + else
27632 + cmdptr = tmpcmd;
27633 +
27634 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27635 return -EFAULT;
27636 +
27637 + if (cmdptr != rq->cmd)
27638 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27639 +
27640 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27641 return -EPERM;
27642
27643 @@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27644 int err;
27645 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27646 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27647 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27648 + unsigned char *cmdptr;
27649
27650 if (!sic)
27651 return -EINVAL;
27652 @@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27653 */
27654 err = -EFAULT;
27655 rq->cmd_len = cmdlen;
27656 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27657 +
27658 + if (rq->cmd != rq->__cmd)
27659 + cmdptr = rq->cmd;
27660 + else
27661 + cmdptr = tmpcmd;
27662 +
27663 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27664 goto error;
27665
27666 + if (rq->cmd != cmdptr)
27667 + memcpy(rq->cmd, cmdptr, cmdlen);
27668 +
27669 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27670 goto error;
27671
27672 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27673 index 3533582..f143117 100644
27674 --- a/crypto/cryptd.c
27675 +++ b/crypto/cryptd.c
27676 @@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27677
27678 struct cryptd_blkcipher_request_ctx {
27679 crypto_completion_t complete;
27680 -};
27681 +} __no_const;
27682
27683 struct cryptd_hash_ctx {
27684 struct crypto_shash *child;
27685 diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27686 index a90d260..7a9765e 100644
27687 --- a/crypto/gf128mul.c
27688 +++ b/crypto/gf128mul.c
27689 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27690 for (i = 0; i < 7; ++i)
27691 gf128mul_x_lle(&p[i + 1], &p[i]);
27692
27693 - memset(r, 0, sizeof(r));
27694 + memset(r, 0, sizeof(*r));
27695 for (i = 0;;) {
27696 u8 ch = ((u8 *)b)[15 - i];
27697
27698 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27699 for (i = 0; i < 7; ++i)
27700 gf128mul_x_bbe(&p[i + 1], &p[i]);
27701
27702 - memset(r, 0, sizeof(r));
27703 + memset(r, 0, sizeof(*r));
27704 for (i = 0;;) {
27705 u8 ch = ((u8 *)b)[i];
27706
27707 diff --git a/crypto/serpent.c b/crypto/serpent.c
27708 index b651a55..023297d 100644
27709 --- a/crypto/serpent.c
27710 +++ b/crypto/serpent.c
27711 @@ -21,6 +21,7 @@
27712 #include <asm/byteorder.h>
27713 #include <linux/crypto.h>
27714 #include <linux/types.h>
27715 +#include <linux/sched.h>
27716
27717 /* Key is padded to the maximum of 256 bits before round key generation.
27718 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27719 @@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27720 u32 r0,r1,r2,r3,r4;
27721 int i;
27722
27723 + pax_track_stack();
27724 +
27725 /* Copy key, add padding */
27726
27727 for (i = 0; i < keylen; ++i)
27728 diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27729 index 0d2cdb8..d8de48d 100644
27730 --- a/drivers/acpi/acpi_pad.c
27731 +++ b/drivers/acpi/acpi_pad.c
27732 @@ -30,7 +30,7 @@
27733 #include <acpi/acpi_bus.h>
27734 #include <acpi/acpi_drivers.h>
27735
27736 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27737 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27738 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27739 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27740 static DEFINE_MUTEX(isolated_cpus_lock);
27741 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27742 index 3f4602b..2e41d36 100644
27743 --- a/drivers/acpi/battery.c
27744 +++ b/drivers/acpi/battery.c
27745 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27746 }
27747
27748 static struct battery_file {
27749 - struct file_operations ops;
27750 + const struct file_operations ops;
27751 mode_t mode;
27752 const char *name;
27753 } acpi_battery_file[] = {
27754 diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27755 index 7338b6a..82f0257 100644
27756 --- a/drivers/acpi/dock.c
27757 +++ b/drivers/acpi/dock.c
27758 @@ -77,7 +77,7 @@ struct dock_dependent_device {
27759 struct list_head list;
27760 struct list_head hotplug_list;
27761 acpi_handle handle;
27762 - struct acpi_dock_ops *ops;
27763 + const struct acpi_dock_ops *ops;
27764 void *context;
27765 };
27766
27767 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27768 * the dock driver after _DCK is executed.
27769 */
27770 int
27771 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27772 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27773 void *context)
27774 {
27775 struct dock_dependent_device *dd;
27776 diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27777 index 7c1c59e..2993595 100644
27778 --- a/drivers/acpi/osl.c
27779 +++ b/drivers/acpi/osl.c
27780 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27781 void __iomem *virt_addr;
27782
27783 virt_addr = ioremap(phys_addr, width);
27784 + if (!virt_addr)
27785 + return AE_NO_MEMORY;
27786 if (!value)
27787 value = &dummy;
27788
27789 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27790 void __iomem *virt_addr;
27791
27792 virt_addr = ioremap(phys_addr, width);
27793 + if (!virt_addr)
27794 + return AE_NO_MEMORY;
27795
27796 switch (width) {
27797 case 8:
27798 diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27799 index c216062..eec10d2 100644
27800 --- a/drivers/acpi/power_meter.c
27801 +++ b/drivers/acpi/power_meter.c
27802 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27803 return res;
27804
27805 temp /= 1000;
27806 - if (temp < 0)
27807 - return -EINVAL;
27808
27809 mutex_lock(&resource->lock);
27810 resource->trip[attr->index - 7] = temp;
27811 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27812 index d0d25e2..961643d 100644
27813 --- a/drivers/acpi/proc.c
27814 +++ b/drivers/acpi/proc.c
27815 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27816 size_t count, loff_t * ppos)
27817 {
27818 struct list_head *node, *next;
27819 - char strbuf[5];
27820 - char str[5] = "";
27821 - unsigned int len = count;
27822 + char strbuf[5] = {0};
27823 struct acpi_device *found_dev = NULL;
27824
27825 - if (len > 4)
27826 - len = 4;
27827 - if (len < 0)
27828 - return -EFAULT;
27829 + if (count > 4)
27830 + count = 4;
27831
27832 - if (copy_from_user(strbuf, buffer, len))
27833 + if (copy_from_user(strbuf, buffer, count))
27834 return -EFAULT;
27835 - strbuf[len] = '\0';
27836 - sscanf(strbuf, "%s", str);
27837 + strbuf[count] = '\0';
27838
27839 mutex_lock(&acpi_device_lock);
27840 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27841 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27842 if (!dev->wakeup.flags.valid)
27843 continue;
27844
27845 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27846 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27847 dev->wakeup.state.enabled =
27848 dev->wakeup.state.enabled ? 0 : 1;
27849 found_dev = dev;
27850 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27851 index 7102474..de8ad22 100644
27852 --- a/drivers/acpi/processor_core.c
27853 +++ b/drivers/acpi/processor_core.c
27854 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27855 return 0;
27856 }
27857
27858 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27859 + BUG_ON(pr->id >= nr_cpu_ids);
27860
27861 /*
27862 * Buggy BIOS check
27863 diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27864 index d933980..5761f13 100644
27865 --- a/drivers/acpi/sbshc.c
27866 +++ b/drivers/acpi/sbshc.c
27867 @@ -17,7 +17,7 @@
27868
27869 #define PREFIX "ACPI: "
27870
27871 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27872 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27873 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27874
27875 struct acpi_smb_hc {
27876 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27877 index 0458094..6978e7b 100644
27878 --- a/drivers/acpi/sleep.c
27879 +++ b/drivers/acpi/sleep.c
27880 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27881 }
27882 }
27883
27884 -static struct platform_suspend_ops acpi_suspend_ops = {
27885 +static const struct platform_suspend_ops acpi_suspend_ops = {
27886 .valid = acpi_suspend_state_valid,
27887 .begin = acpi_suspend_begin,
27888 .prepare_late = acpi_pm_prepare,
27889 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27890 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27891 * been requested.
27892 */
27893 -static struct platform_suspend_ops acpi_suspend_ops_old = {
27894 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
27895 .valid = acpi_suspend_state_valid,
27896 .begin = acpi_suspend_begin_old,
27897 .prepare_late = acpi_pm_disable_gpes,
27898 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27899 acpi_enable_all_runtime_gpes();
27900 }
27901
27902 -static struct platform_hibernation_ops acpi_hibernation_ops = {
27903 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
27904 .begin = acpi_hibernation_begin,
27905 .end = acpi_pm_end,
27906 .pre_snapshot = acpi_hibernation_pre_snapshot,
27907 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27908 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27909 * been requested.
27910 */
27911 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27912 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27913 .begin = acpi_hibernation_begin_old,
27914 .end = acpi_pm_end,
27915 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27916 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27917 index 05dff63..b662ab7 100644
27918 --- a/drivers/acpi/video.c
27919 +++ b/drivers/acpi/video.c
27920 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27921 vd->brightness->levels[request_level]);
27922 }
27923
27924 -static struct backlight_ops acpi_backlight_ops = {
27925 +static const struct backlight_ops acpi_backlight_ops = {
27926 .get_brightness = acpi_video_get_brightness,
27927 .update_status = acpi_video_set_brightness,
27928 };
27929 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27930 index 6787aab..23ffb0e 100644
27931 --- a/drivers/ata/ahci.c
27932 +++ b/drivers/ata/ahci.c
27933 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27934 .sdev_attrs = ahci_sdev_attrs,
27935 };
27936
27937 -static struct ata_port_operations ahci_ops = {
27938 +static const struct ata_port_operations ahci_ops = {
27939 .inherits = &sata_pmp_port_ops,
27940
27941 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27942 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27943 .port_stop = ahci_port_stop,
27944 };
27945
27946 -static struct ata_port_operations ahci_vt8251_ops = {
27947 +static const struct ata_port_operations ahci_vt8251_ops = {
27948 .inherits = &ahci_ops,
27949 .hardreset = ahci_vt8251_hardreset,
27950 };
27951
27952 -static struct ata_port_operations ahci_p5wdh_ops = {
27953 +static const struct ata_port_operations ahci_p5wdh_ops = {
27954 .inherits = &ahci_ops,
27955 .hardreset = ahci_p5wdh_hardreset,
27956 };
27957
27958 -static struct ata_port_operations ahci_sb600_ops = {
27959 +static const struct ata_port_operations ahci_sb600_ops = {
27960 .inherits = &ahci_ops,
27961 .softreset = ahci_sb600_softreset,
27962 .pmp_softreset = ahci_sb600_softreset,
27963 diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27964 index 99e7196..4968c77 100644
27965 --- a/drivers/ata/ata_generic.c
27966 +++ b/drivers/ata/ata_generic.c
27967 @@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27968 ATA_BMDMA_SHT(DRV_NAME),
27969 };
27970
27971 -static struct ata_port_operations generic_port_ops = {
27972 +static const struct ata_port_operations generic_port_ops = {
27973 .inherits = &ata_bmdma_port_ops,
27974 .cable_detect = ata_cable_unknown,
27975 .set_mode = generic_set_mode,
27976 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27977 index c33591d..000c121 100644
27978 --- a/drivers/ata/ata_piix.c
27979 +++ b/drivers/ata/ata_piix.c
27980 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27981 ATA_BMDMA_SHT(DRV_NAME),
27982 };
27983
27984 -static struct ata_port_operations piix_pata_ops = {
27985 +static const struct ata_port_operations piix_pata_ops = {
27986 .inherits = &ata_bmdma32_port_ops,
27987 .cable_detect = ata_cable_40wire,
27988 .set_piomode = piix_set_piomode,
27989 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27990 .prereset = piix_pata_prereset,
27991 };
27992
27993 -static struct ata_port_operations piix_vmw_ops = {
27994 +static const struct ata_port_operations piix_vmw_ops = {
27995 .inherits = &piix_pata_ops,
27996 .bmdma_status = piix_vmw_bmdma_status,
27997 };
27998
27999 -static struct ata_port_operations ich_pata_ops = {
28000 +static const struct ata_port_operations ich_pata_ops = {
28001 .inherits = &piix_pata_ops,
28002 .cable_detect = ich_pata_cable_detect,
28003 .set_dmamode = ich_set_dmamode,
28004 };
28005
28006 -static struct ata_port_operations piix_sata_ops = {
28007 +static const struct ata_port_operations piix_sata_ops = {
28008 .inherits = &ata_bmdma_port_ops,
28009 };
28010
28011 -static struct ata_port_operations piix_sidpr_sata_ops = {
28012 +static const struct ata_port_operations piix_sidpr_sata_ops = {
28013 .inherits = &piix_sata_ops,
28014 .hardreset = sata_std_hardreset,
28015 .scr_read = piix_sidpr_scr_read,
28016 diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
28017 index b0882cd..c295d65 100644
28018 --- a/drivers/ata/libata-acpi.c
28019 +++ b/drivers/ata/libata-acpi.c
28020 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
28021 ata_acpi_uevent(dev->link->ap, dev, event);
28022 }
28023
28024 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
28025 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
28026 .handler = ata_acpi_dev_notify_dock,
28027 .uevent = ata_acpi_dev_uevent,
28028 };
28029
28030 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
28031 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
28032 .handler = ata_acpi_ap_notify_dock,
28033 .uevent = ata_acpi_ap_uevent,
28034 };
28035 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
28036 index d4f7f99..94f603e 100644
28037 --- a/drivers/ata/libata-core.c
28038 +++ b/drivers/ata/libata-core.c
28039 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
28040 struct ata_port *ap;
28041 unsigned int tag;
28042
28043 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28044 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28045 ap = qc->ap;
28046
28047 qc->flags = 0;
28048 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
28049 struct ata_port *ap;
28050 struct ata_link *link;
28051
28052 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28053 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28054 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
28055 ap = qc->ap;
28056 link = qc->dev->link;
28057 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
28058 * LOCKING:
28059 * None.
28060 */
28061 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
28062 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
28063 {
28064 static DEFINE_SPINLOCK(lock);
28065 const struct ata_port_operations *cur;
28066 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28067 return;
28068
28069 spin_lock(&lock);
28070 + pax_open_kernel();
28071
28072 for (cur = ops->inherits; cur; cur = cur->inherits) {
28073 void **inherit = (void **)cur;
28074 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28075 if (IS_ERR(*pp))
28076 *pp = NULL;
28077
28078 - ops->inherits = NULL;
28079 + *(struct ata_port_operations **)&ops->inherits = NULL;
28080
28081 + pax_close_kernel();
28082 spin_unlock(&lock);
28083 }
28084
28085 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
28086 */
28087 /* KILLME - the only user left is ipr */
28088 void ata_host_init(struct ata_host *host, struct device *dev,
28089 - unsigned long flags, struct ata_port_operations *ops)
28090 + unsigned long flags, const struct ata_port_operations *ops)
28091 {
28092 spin_lock_init(&host->lock);
28093 host->dev = dev;
28094 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
28095 /* truly dummy */
28096 }
28097
28098 -struct ata_port_operations ata_dummy_port_ops = {
28099 +const struct ata_port_operations ata_dummy_port_ops = {
28100 .qc_prep = ata_noop_qc_prep,
28101 .qc_issue = ata_dummy_qc_issue,
28102 .error_handler = ata_dummy_error_handler,
28103 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
28104 index e5bdb9b..45a8e72 100644
28105 --- a/drivers/ata/libata-eh.c
28106 +++ b/drivers/ata/libata-eh.c
28107 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
28108 {
28109 struct ata_link *link;
28110
28111 + pax_track_stack();
28112 +
28113 ata_for_each_link(link, ap, HOST_FIRST)
28114 ata_eh_link_report(link);
28115 }
28116 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
28117 */
28118 void ata_std_error_handler(struct ata_port *ap)
28119 {
28120 - struct ata_port_operations *ops = ap->ops;
28121 + const struct ata_port_operations *ops = ap->ops;
28122 ata_reset_fn_t hardreset = ops->hardreset;
28123
28124 /* ignore built-in hardreset if SCR access is not available */
28125 diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
28126 index 51f0ffb..19ce3e3 100644
28127 --- a/drivers/ata/libata-pmp.c
28128 +++ b/drivers/ata/libata-pmp.c
28129 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
28130 */
28131 static int sata_pmp_eh_recover(struct ata_port *ap)
28132 {
28133 - struct ata_port_operations *ops = ap->ops;
28134 + const struct ata_port_operations *ops = ap->ops;
28135 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
28136 struct ata_link *pmp_link = &ap->link;
28137 struct ata_device *pmp_dev = pmp_link->device;
28138 diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
28139 index d8f35fe..288180a 100644
28140 --- a/drivers/ata/pata_acpi.c
28141 +++ b/drivers/ata/pata_acpi.c
28142 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
28143 ATA_BMDMA_SHT(DRV_NAME),
28144 };
28145
28146 -static struct ata_port_operations pacpi_ops = {
28147 +static const struct ata_port_operations pacpi_ops = {
28148 .inherits = &ata_bmdma_port_ops,
28149 .qc_issue = pacpi_qc_issue,
28150 .cable_detect = pacpi_cable_detect,
28151 diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
28152 index 9434114..1f2f364 100644
28153 --- a/drivers/ata/pata_ali.c
28154 +++ b/drivers/ata/pata_ali.c
28155 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
28156 * Port operations for PIO only ALi
28157 */
28158
28159 -static struct ata_port_operations ali_early_port_ops = {
28160 +static const struct ata_port_operations ali_early_port_ops = {
28161 .inherits = &ata_sff_port_ops,
28162 .cable_detect = ata_cable_40wire,
28163 .set_piomode = ali_set_piomode,
28164 @@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
28165 * Port operations for DMA capable ALi without cable
28166 * detect
28167 */
28168 -static struct ata_port_operations ali_20_port_ops = {
28169 +static const struct ata_port_operations ali_20_port_ops = {
28170 .inherits = &ali_dma_base_ops,
28171 .cable_detect = ata_cable_40wire,
28172 .mode_filter = ali_20_filter,
28173 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
28174 /*
28175 * Port operations for DMA capable ALi with cable detect
28176 */
28177 -static struct ata_port_operations ali_c2_port_ops = {
28178 +static const struct ata_port_operations ali_c2_port_ops = {
28179 .inherits = &ali_dma_base_ops,
28180 .check_atapi_dma = ali_check_atapi_dma,
28181 .cable_detect = ali_c2_cable_detect,
28182 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
28183 /*
28184 * Port operations for DMA capable ALi with cable detect
28185 */
28186 -static struct ata_port_operations ali_c4_port_ops = {
28187 +static const struct ata_port_operations ali_c4_port_ops = {
28188 .inherits = &ali_dma_base_ops,
28189 .check_atapi_dma = ali_check_atapi_dma,
28190 .cable_detect = ali_c2_cable_detect,
28191 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
28192 /*
28193 * Port operations for DMA capable ALi with cable detect and LBA48
28194 */
28195 -static struct ata_port_operations ali_c5_port_ops = {
28196 +static const struct ata_port_operations ali_c5_port_ops = {
28197 .inherits = &ali_dma_base_ops,
28198 .check_atapi_dma = ali_check_atapi_dma,
28199 .dev_config = ali_warn_atapi_dma,
28200 diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
28201 index 567f3f7..c8ee0da 100644
28202 --- a/drivers/ata/pata_amd.c
28203 +++ b/drivers/ata/pata_amd.c
28204 @@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
28205 .prereset = amd_pre_reset,
28206 };
28207
28208 -static struct ata_port_operations amd33_port_ops = {
28209 +static const struct ata_port_operations amd33_port_ops = {
28210 .inherits = &amd_base_port_ops,
28211 .cable_detect = ata_cable_40wire,
28212 .set_piomode = amd33_set_piomode,
28213 .set_dmamode = amd33_set_dmamode,
28214 };
28215
28216 -static struct ata_port_operations amd66_port_ops = {
28217 +static const struct ata_port_operations amd66_port_ops = {
28218 .inherits = &amd_base_port_ops,
28219 .cable_detect = ata_cable_unknown,
28220 .set_piomode = amd66_set_piomode,
28221 .set_dmamode = amd66_set_dmamode,
28222 };
28223
28224 -static struct ata_port_operations amd100_port_ops = {
28225 +static const struct ata_port_operations amd100_port_ops = {
28226 .inherits = &amd_base_port_ops,
28227 .cable_detect = ata_cable_unknown,
28228 .set_piomode = amd100_set_piomode,
28229 .set_dmamode = amd100_set_dmamode,
28230 };
28231
28232 -static struct ata_port_operations amd133_port_ops = {
28233 +static const struct ata_port_operations amd133_port_ops = {
28234 .inherits = &amd_base_port_ops,
28235 .cable_detect = amd_cable_detect,
28236 .set_piomode = amd133_set_piomode,
28237 @@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
28238 .host_stop = nv_host_stop,
28239 };
28240
28241 -static struct ata_port_operations nv100_port_ops = {
28242 +static const struct ata_port_operations nv100_port_ops = {
28243 .inherits = &nv_base_port_ops,
28244 .set_piomode = nv100_set_piomode,
28245 .set_dmamode = nv100_set_dmamode,
28246 };
28247
28248 -static struct ata_port_operations nv133_port_ops = {
28249 +static const struct ata_port_operations nv133_port_ops = {
28250 .inherits = &nv_base_port_ops,
28251 .set_piomode = nv133_set_piomode,
28252 .set_dmamode = nv133_set_dmamode,
28253 diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
28254 index d332cfd..4b7eaae 100644
28255 --- a/drivers/ata/pata_artop.c
28256 +++ b/drivers/ata/pata_artop.c
28257 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
28258 ATA_BMDMA_SHT(DRV_NAME),
28259 };
28260
28261 -static struct ata_port_operations artop6210_ops = {
28262 +static const struct ata_port_operations artop6210_ops = {
28263 .inherits = &ata_bmdma_port_ops,
28264 .cable_detect = ata_cable_40wire,
28265 .set_piomode = artop6210_set_piomode,
28266 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
28267 .qc_defer = artop6210_qc_defer,
28268 };
28269
28270 -static struct ata_port_operations artop6260_ops = {
28271 +static const struct ata_port_operations artop6260_ops = {
28272 .inherits = &ata_bmdma_port_ops,
28273 .cable_detect = artop6260_cable_detect,
28274 .set_piomode = artop6260_set_piomode,
28275 diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
28276 index 5c129f9..7bb7ccb 100644
28277 --- a/drivers/ata/pata_at32.c
28278 +++ b/drivers/ata/pata_at32.c
28279 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
28280 ATA_PIO_SHT(DRV_NAME),
28281 };
28282
28283 -static struct ata_port_operations at32_port_ops = {
28284 +static const struct ata_port_operations at32_port_ops = {
28285 .inherits = &ata_sff_port_ops,
28286 .cable_detect = ata_cable_40wire,
28287 .set_piomode = pata_at32_set_piomode,
28288 diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
28289 index 41c94b1..829006d 100644
28290 --- a/drivers/ata/pata_at91.c
28291 +++ b/drivers/ata/pata_at91.c
28292 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
28293 ATA_PIO_SHT(DRV_NAME),
28294 };
28295
28296 -static struct ata_port_operations pata_at91_port_ops = {
28297 +static const struct ata_port_operations pata_at91_port_ops = {
28298 .inherits = &ata_sff_port_ops,
28299
28300 .sff_data_xfer = pata_at91_data_xfer_noirq,
28301 diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
28302 index ae4454d..d391eb4 100644
28303 --- a/drivers/ata/pata_atiixp.c
28304 +++ b/drivers/ata/pata_atiixp.c
28305 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
28306 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28307 };
28308
28309 -static struct ata_port_operations atiixp_port_ops = {
28310 +static const struct ata_port_operations atiixp_port_ops = {
28311 .inherits = &ata_bmdma_port_ops,
28312
28313 .qc_prep = ata_sff_dumb_qc_prep,
28314 diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
28315 index 6fe7ded..2a425dc 100644
28316 --- a/drivers/ata/pata_atp867x.c
28317 +++ b/drivers/ata/pata_atp867x.c
28318 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
28319 ATA_BMDMA_SHT(DRV_NAME),
28320 };
28321
28322 -static struct ata_port_operations atp867x_ops = {
28323 +static const struct ata_port_operations atp867x_ops = {
28324 .inherits = &ata_bmdma_port_ops,
28325 .cable_detect = atp867x_cable_detect,
28326 .set_piomode = atp867x_set_piomode,
28327 diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
28328 index c4b47a3..b27a367 100644
28329 --- a/drivers/ata/pata_bf54x.c
28330 +++ b/drivers/ata/pata_bf54x.c
28331 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
28332 .dma_boundary = ATA_DMA_BOUNDARY,
28333 };
28334
28335 -static struct ata_port_operations bfin_pata_ops = {
28336 +static const struct ata_port_operations bfin_pata_ops = {
28337 .inherits = &ata_sff_port_ops,
28338
28339 .set_piomode = bfin_set_piomode,
28340 diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
28341 index 5acf9fa..84248be 100644
28342 --- a/drivers/ata/pata_cmd640.c
28343 +++ b/drivers/ata/pata_cmd640.c
28344 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
28345 ATA_BMDMA_SHT(DRV_NAME),
28346 };
28347
28348 -static struct ata_port_operations cmd640_port_ops = {
28349 +static const struct ata_port_operations cmd640_port_ops = {
28350 .inherits = &ata_bmdma_port_ops,
28351 /* In theory xfer_noirq is not needed once we kill the prefetcher */
28352 .sff_data_xfer = ata_sff_data_xfer_noirq,
28353 diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
28354 index ccd2694..c869c3d 100644
28355 --- a/drivers/ata/pata_cmd64x.c
28356 +++ b/drivers/ata/pata_cmd64x.c
28357 @@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
28358 .set_dmamode = cmd64x_set_dmamode,
28359 };
28360
28361 -static struct ata_port_operations cmd64x_port_ops = {
28362 +static const struct ata_port_operations cmd64x_port_ops = {
28363 .inherits = &cmd64x_base_ops,
28364 .cable_detect = ata_cable_40wire,
28365 };
28366
28367 -static struct ata_port_operations cmd646r1_port_ops = {
28368 +static const struct ata_port_operations cmd646r1_port_ops = {
28369 .inherits = &cmd64x_base_ops,
28370 .bmdma_stop = cmd646r1_bmdma_stop,
28371 .cable_detect = ata_cable_40wire,
28372 };
28373
28374 -static struct ata_port_operations cmd648_port_ops = {
28375 +static const struct ata_port_operations cmd648_port_ops = {
28376 .inherits = &cmd64x_base_ops,
28377 .bmdma_stop = cmd648_bmdma_stop,
28378 .cable_detect = cmd648_cable_detect,
28379 diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
28380 index 0df83cf..d7595b0 100644
28381 --- a/drivers/ata/pata_cs5520.c
28382 +++ b/drivers/ata/pata_cs5520.c
28383 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
28384 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28385 };
28386
28387 -static struct ata_port_operations cs5520_port_ops = {
28388 +static const struct ata_port_operations cs5520_port_ops = {
28389 .inherits = &ata_bmdma_port_ops,
28390 .qc_prep = ata_sff_dumb_qc_prep,
28391 .cable_detect = ata_cable_40wire,
28392 diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28393 index c974b05..6d26b11 100644
28394 --- a/drivers/ata/pata_cs5530.c
28395 +++ b/drivers/ata/pata_cs5530.c
28396 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28397 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28398 };
28399
28400 -static struct ata_port_operations cs5530_port_ops = {
28401 +static const struct ata_port_operations cs5530_port_ops = {
28402 .inherits = &ata_bmdma_port_ops,
28403
28404 .qc_prep = ata_sff_dumb_qc_prep,
28405 diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28406 index 403f561..aacd26b 100644
28407 --- a/drivers/ata/pata_cs5535.c
28408 +++ b/drivers/ata/pata_cs5535.c
28409 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28410 ATA_BMDMA_SHT(DRV_NAME),
28411 };
28412
28413 -static struct ata_port_operations cs5535_port_ops = {
28414 +static const struct ata_port_operations cs5535_port_ops = {
28415 .inherits = &ata_bmdma_port_ops,
28416 .cable_detect = cs5535_cable_detect,
28417 .set_piomode = cs5535_set_piomode,
28418 diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28419 index 6da4cb4..de24a25 100644
28420 --- a/drivers/ata/pata_cs5536.c
28421 +++ b/drivers/ata/pata_cs5536.c
28422 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28423 ATA_BMDMA_SHT(DRV_NAME),
28424 };
28425
28426 -static struct ata_port_operations cs5536_port_ops = {
28427 +static const struct ata_port_operations cs5536_port_ops = {
28428 .inherits = &ata_bmdma_port_ops,
28429 .cable_detect = cs5536_cable_detect,
28430 .set_piomode = cs5536_set_piomode,
28431 diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28432 index 8fb040b..b16a9c9 100644
28433 --- a/drivers/ata/pata_cypress.c
28434 +++ b/drivers/ata/pata_cypress.c
28435 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28436 ATA_BMDMA_SHT(DRV_NAME),
28437 };
28438
28439 -static struct ata_port_operations cy82c693_port_ops = {
28440 +static const struct ata_port_operations cy82c693_port_ops = {
28441 .inherits = &ata_bmdma_port_ops,
28442 .cable_detect = ata_cable_40wire,
28443 .set_piomode = cy82c693_set_piomode,
28444 diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28445 index 2a6412f..555ee11 100644
28446 --- a/drivers/ata/pata_efar.c
28447 +++ b/drivers/ata/pata_efar.c
28448 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28449 ATA_BMDMA_SHT(DRV_NAME),
28450 };
28451
28452 -static struct ata_port_operations efar_ops = {
28453 +static const struct ata_port_operations efar_ops = {
28454 .inherits = &ata_bmdma_port_ops,
28455 .cable_detect = efar_cable_detect,
28456 .set_piomode = efar_set_piomode,
28457 diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28458 index b9d8836..0b92030 100644
28459 --- a/drivers/ata/pata_hpt366.c
28460 +++ b/drivers/ata/pata_hpt366.c
28461 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28462 * Configuration for HPT366/68
28463 */
28464
28465 -static struct ata_port_operations hpt366_port_ops = {
28466 +static const struct ata_port_operations hpt366_port_ops = {
28467 .inherits = &ata_bmdma_port_ops,
28468 .cable_detect = hpt36x_cable_detect,
28469 .mode_filter = hpt366_filter,
28470 diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28471 index 5af7f19..00c4980 100644
28472 --- a/drivers/ata/pata_hpt37x.c
28473 +++ b/drivers/ata/pata_hpt37x.c
28474 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28475 * Configuration for HPT370
28476 */
28477
28478 -static struct ata_port_operations hpt370_port_ops = {
28479 +static const struct ata_port_operations hpt370_port_ops = {
28480 .inherits = &ata_bmdma_port_ops,
28481
28482 .bmdma_stop = hpt370_bmdma_stop,
28483 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28484 * Configuration for HPT370A. Close to 370 but less filters
28485 */
28486
28487 -static struct ata_port_operations hpt370a_port_ops = {
28488 +static const struct ata_port_operations hpt370a_port_ops = {
28489 .inherits = &hpt370_port_ops,
28490 .mode_filter = hpt370a_filter,
28491 };
28492 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28493 * and DMA mode setting functionality.
28494 */
28495
28496 -static struct ata_port_operations hpt372_port_ops = {
28497 +static const struct ata_port_operations hpt372_port_ops = {
28498 .inherits = &ata_bmdma_port_ops,
28499
28500 .bmdma_stop = hpt37x_bmdma_stop,
28501 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28502 * but we have a different cable detection procedure for function 1.
28503 */
28504
28505 -static struct ata_port_operations hpt374_fn1_port_ops = {
28506 +static const struct ata_port_operations hpt374_fn1_port_ops = {
28507 .inherits = &hpt372_port_ops,
28508 .prereset = hpt374_fn1_pre_reset,
28509 };
28510 diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28511 index 100f227..2e39382 100644
28512 --- a/drivers/ata/pata_hpt3x2n.c
28513 +++ b/drivers/ata/pata_hpt3x2n.c
28514 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28515 * Configuration for HPT3x2n.
28516 */
28517
28518 -static struct ata_port_operations hpt3x2n_port_ops = {
28519 +static const struct ata_port_operations hpt3x2n_port_ops = {
28520 .inherits = &ata_bmdma_port_ops,
28521
28522 .bmdma_stop = hpt3x2n_bmdma_stop,
28523 diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28524 index 7e31025..6fca8f4 100644
28525 --- a/drivers/ata/pata_hpt3x3.c
28526 +++ b/drivers/ata/pata_hpt3x3.c
28527 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28528 ATA_BMDMA_SHT(DRV_NAME),
28529 };
28530
28531 -static struct ata_port_operations hpt3x3_port_ops = {
28532 +static const struct ata_port_operations hpt3x3_port_ops = {
28533 .inherits = &ata_bmdma_port_ops,
28534 .cable_detect = ata_cable_40wire,
28535 .set_piomode = hpt3x3_set_piomode,
28536 diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28537 index b663b7f..9a26c2a 100644
28538 --- a/drivers/ata/pata_icside.c
28539 +++ b/drivers/ata/pata_icside.c
28540 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28541 }
28542 }
28543
28544 -static struct ata_port_operations pata_icside_port_ops = {
28545 +static const struct ata_port_operations pata_icside_port_ops = {
28546 .inherits = &ata_sff_port_ops,
28547 /* no need to build any PRD tables for DMA */
28548 .qc_prep = ata_noop_qc_prep,
28549 diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28550 index 4bceb88..457dfb6 100644
28551 --- a/drivers/ata/pata_isapnp.c
28552 +++ b/drivers/ata/pata_isapnp.c
28553 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28554 ATA_PIO_SHT(DRV_NAME),
28555 };
28556
28557 -static struct ata_port_operations isapnp_port_ops = {
28558 +static const struct ata_port_operations isapnp_port_ops = {
28559 .inherits = &ata_sff_port_ops,
28560 .cable_detect = ata_cable_40wire,
28561 };
28562
28563 -static struct ata_port_operations isapnp_noalt_port_ops = {
28564 +static const struct ata_port_operations isapnp_noalt_port_ops = {
28565 .inherits = &ata_sff_port_ops,
28566 .cable_detect = ata_cable_40wire,
28567 /* No altstatus so we don't want to use the lost interrupt poll */
28568 diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28569 index f156da8..24976e2 100644
28570 --- a/drivers/ata/pata_it8213.c
28571 +++ b/drivers/ata/pata_it8213.c
28572 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28573 };
28574
28575
28576 -static struct ata_port_operations it8213_ops = {
28577 +static const struct ata_port_operations it8213_ops = {
28578 .inherits = &ata_bmdma_port_ops,
28579 .cable_detect = it8213_cable_detect,
28580 .set_piomode = it8213_set_piomode,
28581 diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28582 index 188bc2f..ca9e785 100644
28583 --- a/drivers/ata/pata_it821x.c
28584 +++ b/drivers/ata/pata_it821x.c
28585 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28586 ATA_BMDMA_SHT(DRV_NAME),
28587 };
28588
28589 -static struct ata_port_operations it821x_smart_port_ops = {
28590 +static const struct ata_port_operations it821x_smart_port_ops = {
28591 .inherits = &ata_bmdma_port_ops,
28592
28593 .check_atapi_dma= it821x_check_atapi_dma,
28594 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28595 .port_start = it821x_port_start,
28596 };
28597
28598 -static struct ata_port_operations it821x_passthru_port_ops = {
28599 +static const struct ata_port_operations it821x_passthru_port_ops = {
28600 .inherits = &ata_bmdma_port_ops,
28601
28602 .check_atapi_dma= it821x_check_atapi_dma,
28603 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28604 .port_start = it821x_port_start,
28605 };
28606
28607 -static struct ata_port_operations it821x_rdc_port_ops = {
28608 +static const struct ata_port_operations it821x_rdc_port_ops = {
28609 .inherits = &ata_bmdma_port_ops,
28610
28611 .check_atapi_dma= it821x_check_atapi_dma,
28612 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28613 index ba54b08..4b952b7 100644
28614 --- a/drivers/ata/pata_ixp4xx_cf.c
28615 +++ b/drivers/ata/pata_ixp4xx_cf.c
28616 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28617 ATA_PIO_SHT(DRV_NAME),
28618 };
28619
28620 -static struct ata_port_operations ixp4xx_port_ops = {
28621 +static const struct ata_port_operations ixp4xx_port_ops = {
28622 .inherits = &ata_sff_port_ops,
28623 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28624 .cable_detect = ata_cable_40wire,
28625 diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28626 index 3a1474a..434b0ff 100644
28627 --- a/drivers/ata/pata_jmicron.c
28628 +++ b/drivers/ata/pata_jmicron.c
28629 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28630 ATA_BMDMA_SHT(DRV_NAME),
28631 };
28632
28633 -static struct ata_port_operations jmicron_ops = {
28634 +static const struct ata_port_operations jmicron_ops = {
28635 .inherits = &ata_bmdma_port_ops,
28636 .prereset = jmicron_pre_reset,
28637 };
28638 diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28639 index 6932e56..220e71d 100644
28640 --- a/drivers/ata/pata_legacy.c
28641 +++ b/drivers/ata/pata_legacy.c
28642 @@ -106,7 +106,7 @@ struct legacy_probe {
28643
28644 struct legacy_controller {
28645 const char *name;
28646 - struct ata_port_operations *ops;
28647 + const struct ata_port_operations *ops;
28648 unsigned int pio_mask;
28649 unsigned int flags;
28650 unsigned int pflags;
28651 @@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28652 * pio_mask as well.
28653 */
28654
28655 -static struct ata_port_operations simple_port_ops = {
28656 +static const struct ata_port_operations simple_port_ops = {
28657 .inherits = &legacy_base_port_ops,
28658 .sff_data_xfer = ata_sff_data_xfer_noirq,
28659 };
28660
28661 -static struct ata_port_operations legacy_port_ops = {
28662 +static const struct ata_port_operations legacy_port_ops = {
28663 .inherits = &legacy_base_port_ops,
28664 .sff_data_xfer = ata_sff_data_xfer_noirq,
28665 .set_mode = legacy_set_mode,
28666 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28667 return buflen;
28668 }
28669
28670 -static struct ata_port_operations pdc20230_port_ops = {
28671 +static const struct ata_port_operations pdc20230_port_ops = {
28672 .inherits = &legacy_base_port_ops,
28673 .set_piomode = pdc20230_set_piomode,
28674 .sff_data_xfer = pdc_data_xfer_vlb,
28675 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28676 ioread8(ap->ioaddr.status_addr);
28677 }
28678
28679 -static struct ata_port_operations ht6560a_port_ops = {
28680 +static const struct ata_port_operations ht6560a_port_ops = {
28681 .inherits = &legacy_base_port_ops,
28682 .set_piomode = ht6560a_set_piomode,
28683 };
28684 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28685 ioread8(ap->ioaddr.status_addr);
28686 }
28687
28688 -static struct ata_port_operations ht6560b_port_ops = {
28689 +static const struct ata_port_operations ht6560b_port_ops = {
28690 .inherits = &legacy_base_port_ops,
28691 .set_piomode = ht6560b_set_piomode,
28692 };
28693 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28694 }
28695
28696
28697 -static struct ata_port_operations opti82c611a_port_ops = {
28698 +static const struct ata_port_operations opti82c611a_port_ops = {
28699 .inherits = &legacy_base_port_ops,
28700 .set_piomode = opti82c611a_set_piomode,
28701 };
28702 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28703 return ata_sff_qc_issue(qc);
28704 }
28705
28706 -static struct ata_port_operations opti82c46x_port_ops = {
28707 +static const struct ata_port_operations opti82c46x_port_ops = {
28708 .inherits = &legacy_base_port_ops,
28709 .set_piomode = opti82c46x_set_piomode,
28710 .qc_issue = opti82c46x_qc_issue,
28711 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28712 return 0;
28713 }
28714
28715 -static struct ata_port_operations qdi6500_port_ops = {
28716 +static const struct ata_port_operations qdi6500_port_ops = {
28717 .inherits = &legacy_base_port_ops,
28718 .set_piomode = qdi6500_set_piomode,
28719 .qc_issue = qdi_qc_issue,
28720 .sff_data_xfer = vlb32_data_xfer,
28721 };
28722
28723 -static struct ata_port_operations qdi6580_port_ops = {
28724 +static const struct ata_port_operations qdi6580_port_ops = {
28725 .inherits = &legacy_base_port_ops,
28726 .set_piomode = qdi6580_set_piomode,
28727 .sff_data_xfer = vlb32_data_xfer,
28728 };
28729
28730 -static struct ata_port_operations qdi6580dp_port_ops = {
28731 +static const struct ata_port_operations qdi6580dp_port_ops = {
28732 .inherits = &legacy_base_port_ops,
28733 .set_piomode = qdi6580dp_set_piomode,
28734 .sff_data_xfer = vlb32_data_xfer,
28735 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28736 return 0;
28737 }
28738
28739 -static struct ata_port_operations winbond_port_ops = {
28740 +static const struct ata_port_operations winbond_port_ops = {
28741 .inherits = &legacy_base_port_ops,
28742 .set_piomode = winbond_set_piomode,
28743 .sff_data_xfer = vlb32_data_xfer,
28744 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28745 int pio_modes = controller->pio_mask;
28746 unsigned long io = probe->port;
28747 u32 mask = (1 << probe->slot);
28748 - struct ata_port_operations *ops = controller->ops;
28749 + const struct ata_port_operations *ops = controller->ops;
28750 struct legacy_data *ld = &legacy_data[probe->slot];
28751 struct ata_host *host = NULL;
28752 struct ata_port *ap;
28753 diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28754 index 2096fb7..4d090fc 100644
28755 --- a/drivers/ata/pata_marvell.c
28756 +++ b/drivers/ata/pata_marvell.c
28757 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28758 ATA_BMDMA_SHT(DRV_NAME),
28759 };
28760
28761 -static struct ata_port_operations marvell_ops = {
28762 +static const struct ata_port_operations marvell_ops = {
28763 .inherits = &ata_bmdma_port_ops,
28764 .cable_detect = marvell_cable_detect,
28765 .prereset = marvell_pre_reset,
28766 diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28767 index 99d41be..7d56aa8 100644
28768 --- a/drivers/ata/pata_mpc52xx.c
28769 +++ b/drivers/ata/pata_mpc52xx.c
28770 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28771 ATA_PIO_SHT(DRV_NAME),
28772 };
28773
28774 -static struct ata_port_operations mpc52xx_ata_port_ops = {
28775 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
28776 .inherits = &ata_bmdma_port_ops,
28777 .sff_dev_select = mpc52xx_ata_dev_select,
28778 .set_piomode = mpc52xx_ata_set_piomode,
28779 diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28780 index b21f002..0a27e7f 100644
28781 --- a/drivers/ata/pata_mpiix.c
28782 +++ b/drivers/ata/pata_mpiix.c
28783 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28784 ATA_PIO_SHT(DRV_NAME),
28785 };
28786
28787 -static struct ata_port_operations mpiix_port_ops = {
28788 +static const struct ata_port_operations mpiix_port_ops = {
28789 .inherits = &ata_sff_port_ops,
28790 .qc_issue = mpiix_qc_issue,
28791 .cable_detect = ata_cable_40wire,
28792 diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28793 index f0d52f7..89c3be3 100644
28794 --- a/drivers/ata/pata_netcell.c
28795 +++ b/drivers/ata/pata_netcell.c
28796 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28797 ATA_BMDMA_SHT(DRV_NAME),
28798 };
28799
28800 -static struct ata_port_operations netcell_ops = {
28801 +static const struct ata_port_operations netcell_ops = {
28802 .inherits = &ata_bmdma_port_ops,
28803 .cable_detect = ata_cable_80wire,
28804 .read_id = netcell_read_id,
28805 diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28806 index dd53a66..a3f4317 100644
28807 --- a/drivers/ata/pata_ninja32.c
28808 +++ b/drivers/ata/pata_ninja32.c
28809 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28810 ATA_BMDMA_SHT(DRV_NAME),
28811 };
28812
28813 -static struct ata_port_operations ninja32_port_ops = {
28814 +static const struct ata_port_operations ninja32_port_ops = {
28815 .inherits = &ata_bmdma_port_ops,
28816 .sff_dev_select = ninja32_dev_select,
28817 .cable_detect = ata_cable_40wire,
28818 diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28819 index ca53fac..9aa93ef 100644
28820 --- a/drivers/ata/pata_ns87410.c
28821 +++ b/drivers/ata/pata_ns87410.c
28822 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28823 ATA_PIO_SHT(DRV_NAME),
28824 };
28825
28826 -static struct ata_port_operations ns87410_port_ops = {
28827 +static const struct ata_port_operations ns87410_port_ops = {
28828 .inherits = &ata_sff_port_ops,
28829 .qc_issue = ns87410_qc_issue,
28830 .cable_detect = ata_cable_40wire,
28831 diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28832 index 773b159..55f454e 100644
28833 --- a/drivers/ata/pata_ns87415.c
28834 +++ b/drivers/ata/pata_ns87415.c
28835 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28836 }
28837 #endif /* 87560 SuperIO Support */
28838
28839 -static struct ata_port_operations ns87415_pata_ops = {
28840 +static const struct ata_port_operations ns87415_pata_ops = {
28841 .inherits = &ata_bmdma_port_ops,
28842
28843 .check_atapi_dma = ns87415_check_atapi_dma,
28844 @@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28845 };
28846
28847 #if defined(CONFIG_SUPERIO)
28848 -static struct ata_port_operations ns87560_pata_ops = {
28849 +static const struct ata_port_operations ns87560_pata_ops = {
28850 .inherits = &ns87415_pata_ops,
28851 .sff_tf_read = ns87560_tf_read,
28852 .sff_check_status = ns87560_check_status,
28853 diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28854 index d6f6956..639295b 100644
28855 --- a/drivers/ata/pata_octeon_cf.c
28856 +++ b/drivers/ata/pata_octeon_cf.c
28857 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28858 return 0;
28859 }
28860
28861 +/* cannot be const */
28862 static struct ata_port_operations octeon_cf_ops = {
28863 .inherits = &ata_sff_port_ops,
28864 .check_atapi_dma = octeon_cf_check_atapi_dma,
28865 diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28866 index 84ac503..adee1cd 100644
28867 --- a/drivers/ata/pata_oldpiix.c
28868 +++ b/drivers/ata/pata_oldpiix.c
28869 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28870 ATA_BMDMA_SHT(DRV_NAME),
28871 };
28872
28873 -static struct ata_port_operations oldpiix_pata_ops = {
28874 +static const struct ata_port_operations oldpiix_pata_ops = {
28875 .inherits = &ata_bmdma_port_ops,
28876 .qc_issue = oldpiix_qc_issue,
28877 .cable_detect = ata_cable_40wire,
28878 diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28879 index 99eddda..3a4c0aa 100644
28880 --- a/drivers/ata/pata_opti.c
28881 +++ b/drivers/ata/pata_opti.c
28882 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28883 ATA_PIO_SHT(DRV_NAME),
28884 };
28885
28886 -static struct ata_port_operations opti_port_ops = {
28887 +static const struct ata_port_operations opti_port_ops = {
28888 .inherits = &ata_sff_port_ops,
28889 .cable_detect = ata_cable_40wire,
28890 .set_piomode = opti_set_piomode,
28891 diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28892 index 86885a4..8e9968d 100644
28893 --- a/drivers/ata/pata_optidma.c
28894 +++ b/drivers/ata/pata_optidma.c
28895 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28896 ATA_BMDMA_SHT(DRV_NAME),
28897 };
28898
28899 -static struct ata_port_operations optidma_port_ops = {
28900 +static const struct ata_port_operations optidma_port_ops = {
28901 .inherits = &ata_bmdma_port_ops,
28902 .cable_detect = ata_cable_40wire,
28903 .set_piomode = optidma_set_pio_mode,
28904 @@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28905 .prereset = optidma_pre_reset,
28906 };
28907
28908 -static struct ata_port_operations optiplus_port_ops = {
28909 +static const struct ata_port_operations optiplus_port_ops = {
28910 .inherits = &optidma_port_ops,
28911 .set_piomode = optiplus_set_pio_mode,
28912 .set_dmamode = optiplus_set_dma_mode,
28913 diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28914 index 11fb4cc..1a14022 100644
28915 --- a/drivers/ata/pata_palmld.c
28916 +++ b/drivers/ata/pata_palmld.c
28917 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28918 ATA_PIO_SHT(DRV_NAME),
28919 };
28920
28921 -static struct ata_port_operations palmld_port_ops = {
28922 +static const struct ata_port_operations palmld_port_ops = {
28923 .inherits = &ata_sff_port_ops,
28924 .sff_data_xfer = ata_sff_data_xfer_noirq,
28925 .cable_detect = ata_cable_40wire,
28926 diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28927 index dc99e26..7f4b1e4 100644
28928 --- a/drivers/ata/pata_pcmcia.c
28929 +++ b/drivers/ata/pata_pcmcia.c
28930 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28931 ATA_PIO_SHT(DRV_NAME),
28932 };
28933
28934 -static struct ata_port_operations pcmcia_port_ops = {
28935 +static const struct ata_port_operations pcmcia_port_ops = {
28936 .inherits = &ata_sff_port_ops,
28937 .sff_data_xfer = ata_sff_data_xfer_noirq,
28938 .cable_detect = ata_cable_40wire,
28939 .set_mode = pcmcia_set_mode,
28940 };
28941
28942 -static struct ata_port_operations pcmcia_8bit_port_ops = {
28943 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
28944 .inherits = &ata_sff_port_ops,
28945 .sff_data_xfer = ata_data_xfer_8bit,
28946 .cable_detect = ata_cable_40wire,
28947 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28948 unsigned long io_base, ctl_base;
28949 void __iomem *io_addr, *ctl_addr;
28950 int n_ports = 1;
28951 - struct ata_port_operations *ops = &pcmcia_port_ops;
28952 + const struct ata_port_operations *ops = &pcmcia_port_ops;
28953
28954 info = kzalloc(sizeof(*info), GFP_KERNEL);
28955 if (info == NULL)
28956 diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28957 index ca5cad0..3a1f125 100644
28958 --- a/drivers/ata/pata_pdc2027x.c
28959 +++ b/drivers/ata/pata_pdc2027x.c
28960 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28961 ATA_BMDMA_SHT(DRV_NAME),
28962 };
28963
28964 -static struct ata_port_operations pdc2027x_pata100_ops = {
28965 +static const struct ata_port_operations pdc2027x_pata100_ops = {
28966 .inherits = &ata_bmdma_port_ops,
28967 .check_atapi_dma = pdc2027x_check_atapi_dma,
28968 .cable_detect = pdc2027x_cable_detect,
28969 .prereset = pdc2027x_prereset,
28970 };
28971
28972 -static struct ata_port_operations pdc2027x_pata133_ops = {
28973 +static const struct ata_port_operations pdc2027x_pata133_ops = {
28974 .inherits = &pdc2027x_pata100_ops,
28975 .mode_filter = pdc2027x_mode_filter,
28976 .set_piomode = pdc2027x_set_piomode,
28977 diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28978 index 2911120..4bf62aa 100644
28979 --- a/drivers/ata/pata_pdc202xx_old.c
28980 +++ b/drivers/ata/pata_pdc202xx_old.c
28981 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28982 ATA_BMDMA_SHT(DRV_NAME),
28983 };
28984
28985 -static struct ata_port_operations pdc2024x_port_ops = {
28986 +static const struct ata_port_operations pdc2024x_port_ops = {
28987 .inherits = &ata_bmdma_port_ops,
28988
28989 .cable_detect = ata_cable_40wire,
28990 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28991 .sff_exec_command = pdc202xx_exec_command,
28992 };
28993
28994 -static struct ata_port_operations pdc2026x_port_ops = {
28995 +static const struct ata_port_operations pdc2026x_port_ops = {
28996 .inherits = &pdc2024x_port_ops,
28997
28998 .check_atapi_dma = pdc2026x_check_atapi_dma,
28999 diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
29000 index 3f6ebc6..a18c358 100644
29001 --- a/drivers/ata/pata_platform.c
29002 +++ b/drivers/ata/pata_platform.c
29003 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
29004 ATA_PIO_SHT(DRV_NAME),
29005 };
29006
29007 -static struct ata_port_operations pata_platform_port_ops = {
29008 +static const struct ata_port_operations pata_platform_port_ops = {
29009 .inherits = &ata_sff_port_ops,
29010 .sff_data_xfer = ata_sff_data_xfer_noirq,
29011 .cable_detect = ata_cable_unknown,
29012 diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
29013 index 45879dc..165a9f9 100644
29014 --- a/drivers/ata/pata_qdi.c
29015 +++ b/drivers/ata/pata_qdi.c
29016 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
29017 ATA_PIO_SHT(DRV_NAME),
29018 };
29019
29020 -static struct ata_port_operations qdi6500_port_ops = {
29021 +static const struct ata_port_operations qdi6500_port_ops = {
29022 .inherits = &ata_sff_port_ops,
29023 .qc_issue = qdi_qc_issue,
29024 .sff_data_xfer = qdi_data_xfer,
29025 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
29026 .set_piomode = qdi6500_set_piomode,
29027 };
29028
29029 -static struct ata_port_operations qdi6580_port_ops = {
29030 +static const struct ata_port_operations qdi6580_port_ops = {
29031 .inherits = &qdi6500_port_ops,
29032 .set_piomode = qdi6580_set_piomode,
29033 };
29034 diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
29035 index 4401b33..716c5cc 100644
29036 --- a/drivers/ata/pata_radisys.c
29037 +++ b/drivers/ata/pata_radisys.c
29038 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
29039 ATA_BMDMA_SHT(DRV_NAME),
29040 };
29041
29042 -static struct ata_port_operations radisys_pata_ops = {
29043 +static const struct ata_port_operations radisys_pata_ops = {
29044 .inherits = &ata_bmdma_port_ops,
29045 .qc_issue = radisys_qc_issue,
29046 .cable_detect = ata_cable_unknown,
29047 diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
29048 index 45f1e10..fab6bca 100644
29049 --- a/drivers/ata/pata_rb532_cf.c
29050 +++ b/drivers/ata/pata_rb532_cf.c
29051 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
29052 return IRQ_HANDLED;
29053 }
29054
29055 -static struct ata_port_operations rb532_pata_port_ops = {
29056 +static const struct ata_port_operations rb532_pata_port_ops = {
29057 .inherits = &ata_sff_port_ops,
29058 .sff_data_xfer = ata_sff_data_xfer32,
29059 };
29060 diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
29061 index c843a1e..b5853c3 100644
29062 --- a/drivers/ata/pata_rdc.c
29063 +++ b/drivers/ata/pata_rdc.c
29064 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
29065 pci_write_config_byte(dev, 0x48, udma_enable);
29066 }
29067
29068 -static struct ata_port_operations rdc_pata_ops = {
29069 +static const struct ata_port_operations rdc_pata_ops = {
29070 .inherits = &ata_bmdma32_port_ops,
29071 .cable_detect = rdc_pata_cable_detect,
29072 .set_piomode = rdc_set_piomode,
29073 diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
29074 index a5e4dfe..080c8c9 100644
29075 --- a/drivers/ata/pata_rz1000.c
29076 +++ b/drivers/ata/pata_rz1000.c
29077 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
29078 ATA_PIO_SHT(DRV_NAME),
29079 };
29080
29081 -static struct ata_port_operations rz1000_port_ops = {
29082 +static const struct ata_port_operations rz1000_port_ops = {
29083 .inherits = &ata_sff_port_ops,
29084 .cable_detect = ata_cable_40wire,
29085 .set_mode = rz1000_set_mode,
29086 diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
29087 index 3bbed83..e309daf 100644
29088 --- a/drivers/ata/pata_sc1200.c
29089 +++ b/drivers/ata/pata_sc1200.c
29090 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
29091 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29092 };
29093
29094 -static struct ata_port_operations sc1200_port_ops = {
29095 +static const struct ata_port_operations sc1200_port_ops = {
29096 .inherits = &ata_bmdma_port_ops,
29097 .qc_prep = ata_sff_dumb_qc_prep,
29098 .qc_issue = sc1200_qc_issue,
29099 diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
29100 index 4257d6b..4c1d9d5 100644
29101 --- a/drivers/ata/pata_scc.c
29102 +++ b/drivers/ata/pata_scc.c
29103 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
29104 ATA_BMDMA_SHT(DRV_NAME),
29105 };
29106
29107 -static struct ata_port_operations scc_pata_ops = {
29108 +static const struct ata_port_operations scc_pata_ops = {
29109 .inherits = &ata_bmdma_port_ops,
29110
29111 .set_piomode = scc_set_piomode,
29112 diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
29113 index 99cceb4..e2e0a87 100644
29114 --- a/drivers/ata/pata_sch.c
29115 +++ b/drivers/ata/pata_sch.c
29116 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
29117 ATA_BMDMA_SHT(DRV_NAME),
29118 };
29119
29120 -static struct ata_port_operations sch_pata_ops = {
29121 +static const struct ata_port_operations sch_pata_ops = {
29122 .inherits = &ata_bmdma_port_ops,
29123 .cable_detect = ata_cable_unknown,
29124 .set_piomode = sch_set_piomode,
29125 diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
29126 index beaed12..39969f1 100644
29127 --- a/drivers/ata/pata_serverworks.c
29128 +++ b/drivers/ata/pata_serverworks.c
29129 @@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
29130 ATA_BMDMA_SHT(DRV_NAME),
29131 };
29132
29133 -static struct ata_port_operations serverworks_osb4_port_ops = {
29134 +static const struct ata_port_operations serverworks_osb4_port_ops = {
29135 .inherits = &ata_bmdma_port_ops,
29136 .cable_detect = serverworks_cable_detect,
29137 .mode_filter = serverworks_osb4_filter,
29138 @@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
29139 .set_dmamode = serverworks_set_dmamode,
29140 };
29141
29142 -static struct ata_port_operations serverworks_csb_port_ops = {
29143 +static const struct ata_port_operations serverworks_csb_port_ops = {
29144 .inherits = &serverworks_osb4_port_ops,
29145 .mode_filter = serverworks_csb_filter,
29146 };
29147 diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
29148 index a2ace48..0463b44 100644
29149 --- a/drivers/ata/pata_sil680.c
29150 +++ b/drivers/ata/pata_sil680.c
29151 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
29152 ATA_BMDMA_SHT(DRV_NAME),
29153 };
29154
29155 -static struct ata_port_operations sil680_port_ops = {
29156 +static const struct ata_port_operations sil680_port_ops = {
29157 .inherits = &ata_bmdma32_port_ops,
29158 .cable_detect = sil680_cable_detect,
29159 .set_piomode = sil680_set_piomode,
29160 diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
29161 index 488e77b..b3724d5 100644
29162 --- a/drivers/ata/pata_sis.c
29163 +++ b/drivers/ata/pata_sis.c
29164 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
29165 ATA_BMDMA_SHT(DRV_NAME),
29166 };
29167
29168 -static struct ata_port_operations sis_133_for_sata_ops = {
29169 +static const struct ata_port_operations sis_133_for_sata_ops = {
29170 .inherits = &ata_bmdma_port_ops,
29171 .set_piomode = sis_133_set_piomode,
29172 .set_dmamode = sis_133_set_dmamode,
29173 .cable_detect = sis_133_cable_detect,
29174 };
29175
29176 -static struct ata_port_operations sis_base_ops = {
29177 +static const struct ata_port_operations sis_base_ops = {
29178 .inherits = &ata_bmdma_port_ops,
29179 .prereset = sis_pre_reset,
29180 };
29181
29182 -static struct ata_port_operations sis_133_ops = {
29183 +static const struct ata_port_operations sis_133_ops = {
29184 .inherits = &sis_base_ops,
29185 .set_piomode = sis_133_set_piomode,
29186 .set_dmamode = sis_133_set_dmamode,
29187 .cable_detect = sis_133_cable_detect,
29188 };
29189
29190 -static struct ata_port_operations sis_133_early_ops = {
29191 +static const struct ata_port_operations sis_133_early_ops = {
29192 .inherits = &sis_base_ops,
29193 .set_piomode = sis_100_set_piomode,
29194 .set_dmamode = sis_133_early_set_dmamode,
29195 .cable_detect = sis_66_cable_detect,
29196 };
29197
29198 -static struct ata_port_operations sis_100_ops = {
29199 +static const struct ata_port_operations sis_100_ops = {
29200 .inherits = &sis_base_ops,
29201 .set_piomode = sis_100_set_piomode,
29202 .set_dmamode = sis_100_set_dmamode,
29203 .cable_detect = sis_66_cable_detect,
29204 };
29205
29206 -static struct ata_port_operations sis_66_ops = {
29207 +static const struct ata_port_operations sis_66_ops = {
29208 .inherits = &sis_base_ops,
29209 .set_piomode = sis_old_set_piomode,
29210 .set_dmamode = sis_66_set_dmamode,
29211 .cable_detect = sis_66_cable_detect,
29212 };
29213
29214 -static struct ata_port_operations sis_old_ops = {
29215 +static const struct ata_port_operations sis_old_ops = {
29216 .inherits = &sis_base_ops,
29217 .set_piomode = sis_old_set_piomode,
29218 .set_dmamode = sis_old_set_dmamode,
29219 diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
29220 index 29f733c..43e9ca0 100644
29221 --- a/drivers/ata/pata_sl82c105.c
29222 +++ b/drivers/ata/pata_sl82c105.c
29223 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
29224 ATA_BMDMA_SHT(DRV_NAME),
29225 };
29226
29227 -static struct ata_port_operations sl82c105_port_ops = {
29228 +static const struct ata_port_operations sl82c105_port_ops = {
29229 .inherits = &ata_bmdma_port_ops,
29230 .qc_defer = sl82c105_qc_defer,
29231 .bmdma_start = sl82c105_bmdma_start,
29232 diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
29233 index f1f13ff..df39e99 100644
29234 --- a/drivers/ata/pata_triflex.c
29235 +++ b/drivers/ata/pata_triflex.c
29236 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
29237 ATA_BMDMA_SHT(DRV_NAME),
29238 };
29239
29240 -static struct ata_port_operations triflex_port_ops = {
29241 +static const struct ata_port_operations triflex_port_ops = {
29242 .inherits = &ata_bmdma_port_ops,
29243 .bmdma_start = triflex_bmdma_start,
29244 .bmdma_stop = triflex_bmdma_stop,
29245 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
29246 index 1d73b8d..98a4b29 100644
29247 --- a/drivers/ata/pata_via.c
29248 +++ b/drivers/ata/pata_via.c
29249 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
29250 ATA_BMDMA_SHT(DRV_NAME),
29251 };
29252
29253 -static struct ata_port_operations via_port_ops = {
29254 +static const struct ata_port_operations via_port_ops = {
29255 .inherits = &ata_bmdma_port_ops,
29256 .cable_detect = via_cable_detect,
29257 .set_piomode = via_set_piomode,
29258 @@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
29259 .port_start = via_port_start,
29260 };
29261
29262 -static struct ata_port_operations via_port_ops_noirq = {
29263 +static const struct ata_port_operations via_port_ops_noirq = {
29264 .inherits = &via_port_ops,
29265 .sff_data_xfer = ata_sff_data_xfer_noirq,
29266 };
29267 diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
29268 index 6d8619b..ad511c4 100644
29269 --- a/drivers/ata/pata_winbond.c
29270 +++ b/drivers/ata/pata_winbond.c
29271 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
29272 ATA_PIO_SHT(DRV_NAME),
29273 };
29274
29275 -static struct ata_port_operations winbond_port_ops = {
29276 +static const struct ata_port_operations winbond_port_ops = {
29277 .inherits = &ata_sff_port_ops,
29278 .sff_data_xfer = winbond_data_xfer,
29279 .cable_detect = ata_cable_40wire,
29280 diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
29281 index 6c65b07..f996ec7 100644
29282 --- a/drivers/ata/pdc_adma.c
29283 +++ b/drivers/ata/pdc_adma.c
29284 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
29285 .dma_boundary = ADMA_DMA_BOUNDARY,
29286 };
29287
29288 -static struct ata_port_operations adma_ata_ops = {
29289 +static const struct ata_port_operations adma_ata_ops = {
29290 .inherits = &ata_sff_port_ops,
29291
29292 .lost_interrupt = ATA_OP_NULL,
29293 diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
29294 index 172b57e..c49bc1e 100644
29295 --- a/drivers/ata/sata_fsl.c
29296 +++ b/drivers/ata/sata_fsl.c
29297 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
29298 .dma_boundary = ATA_DMA_BOUNDARY,
29299 };
29300
29301 -static struct ata_port_operations sata_fsl_ops = {
29302 +static const struct ata_port_operations sata_fsl_ops = {
29303 .inherits = &sata_pmp_port_ops,
29304
29305 .qc_defer = ata_std_qc_defer,
29306 diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
29307 index 4406902..60603ef 100644
29308 --- a/drivers/ata/sata_inic162x.c
29309 +++ b/drivers/ata/sata_inic162x.c
29310 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
29311 return 0;
29312 }
29313
29314 -static struct ata_port_operations inic_port_ops = {
29315 +static const struct ata_port_operations inic_port_ops = {
29316 .inherits = &sata_port_ops,
29317
29318 .check_atapi_dma = inic_check_atapi_dma,
29319 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
29320 index cf41126..8107be6 100644
29321 --- a/drivers/ata/sata_mv.c
29322 +++ b/drivers/ata/sata_mv.c
29323 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
29324 .dma_boundary = MV_DMA_BOUNDARY,
29325 };
29326
29327 -static struct ata_port_operations mv5_ops = {
29328 +static const struct ata_port_operations mv5_ops = {
29329 .inherits = &ata_sff_port_ops,
29330
29331 .lost_interrupt = ATA_OP_NULL,
29332 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
29333 .port_stop = mv_port_stop,
29334 };
29335
29336 -static struct ata_port_operations mv6_ops = {
29337 +static const struct ata_port_operations mv6_ops = {
29338 .inherits = &mv5_ops,
29339 .dev_config = mv6_dev_config,
29340 .scr_read = mv_scr_read,
29341 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
29342 .bmdma_status = mv_bmdma_status,
29343 };
29344
29345 -static struct ata_port_operations mv_iie_ops = {
29346 +static const struct ata_port_operations mv_iie_ops = {
29347 .inherits = &mv6_ops,
29348 .dev_config = ATA_OP_NULL,
29349 .qc_prep = mv_qc_prep_iie,
29350 diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
29351 index ae2297c..d5c9c33 100644
29352 --- a/drivers/ata/sata_nv.c
29353 +++ b/drivers/ata/sata_nv.c
29354 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
29355 * cases. Define nv_hardreset() which only kicks in for post-boot
29356 * probing and use it for all variants.
29357 */
29358 -static struct ata_port_operations nv_generic_ops = {
29359 +static const struct ata_port_operations nv_generic_ops = {
29360 .inherits = &ata_bmdma_port_ops,
29361 .lost_interrupt = ATA_OP_NULL,
29362 .scr_read = nv_scr_read,
29363 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
29364 .hardreset = nv_hardreset,
29365 };
29366
29367 -static struct ata_port_operations nv_nf2_ops = {
29368 +static const struct ata_port_operations nv_nf2_ops = {
29369 .inherits = &nv_generic_ops,
29370 .freeze = nv_nf2_freeze,
29371 .thaw = nv_nf2_thaw,
29372 };
29373
29374 -static struct ata_port_operations nv_ck804_ops = {
29375 +static const struct ata_port_operations nv_ck804_ops = {
29376 .inherits = &nv_generic_ops,
29377 .freeze = nv_ck804_freeze,
29378 .thaw = nv_ck804_thaw,
29379 .host_stop = nv_ck804_host_stop,
29380 };
29381
29382 -static struct ata_port_operations nv_adma_ops = {
29383 +static const struct ata_port_operations nv_adma_ops = {
29384 .inherits = &nv_ck804_ops,
29385
29386 .check_atapi_dma = nv_adma_check_atapi_dma,
29387 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
29388 .host_stop = nv_adma_host_stop,
29389 };
29390
29391 -static struct ata_port_operations nv_swncq_ops = {
29392 +static const struct ata_port_operations nv_swncq_ops = {
29393 .inherits = &nv_generic_ops,
29394
29395 .qc_defer = ata_std_qc_defer,
29396 diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29397 index 07d8d00..6cc70bb 100644
29398 --- a/drivers/ata/sata_promise.c
29399 +++ b/drivers/ata/sata_promise.c
29400 @@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29401 .error_handler = pdc_error_handler,
29402 };
29403
29404 -static struct ata_port_operations pdc_sata_ops = {
29405 +static const struct ata_port_operations pdc_sata_ops = {
29406 .inherits = &pdc_common_ops,
29407 .cable_detect = pdc_sata_cable_detect,
29408 .freeze = pdc_sata_freeze,
29409 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29410
29411 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29412 and ->freeze/thaw that ignore the hotplug controls. */
29413 -static struct ata_port_operations pdc_old_sata_ops = {
29414 +static const struct ata_port_operations pdc_old_sata_ops = {
29415 .inherits = &pdc_sata_ops,
29416 .freeze = pdc_freeze,
29417 .thaw = pdc_thaw,
29418 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29419 };
29420
29421 -static struct ata_port_operations pdc_pata_ops = {
29422 +static const struct ata_port_operations pdc_pata_ops = {
29423 .inherits = &pdc_common_ops,
29424 .cable_detect = pdc_pata_cable_detect,
29425 .freeze = pdc_freeze,
29426 diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29427 index 326c0cf..36ecebe 100644
29428 --- a/drivers/ata/sata_qstor.c
29429 +++ b/drivers/ata/sata_qstor.c
29430 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29431 .dma_boundary = QS_DMA_BOUNDARY,
29432 };
29433
29434 -static struct ata_port_operations qs_ata_ops = {
29435 +static const struct ata_port_operations qs_ata_ops = {
29436 .inherits = &ata_sff_port_ops,
29437
29438 .check_atapi_dma = qs_check_atapi_dma,
29439 diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29440 index 3cb69d5..0871d3c 100644
29441 --- a/drivers/ata/sata_sil.c
29442 +++ b/drivers/ata/sata_sil.c
29443 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29444 .sg_tablesize = ATA_MAX_PRD
29445 };
29446
29447 -static struct ata_port_operations sil_ops = {
29448 +static const struct ata_port_operations sil_ops = {
29449 .inherits = &ata_bmdma32_port_ops,
29450 .dev_config = sil_dev_config,
29451 .set_mode = sil_set_mode,
29452 diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29453 index e6946fc..eddb794 100644
29454 --- a/drivers/ata/sata_sil24.c
29455 +++ b/drivers/ata/sata_sil24.c
29456 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29457 .dma_boundary = ATA_DMA_BOUNDARY,
29458 };
29459
29460 -static struct ata_port_operations sil24_ops = {
29461 +static const struct ata_port_operations sil24_ops = {
29462 .inherits = &sata_pmp_port_ops,
29463
29464 .qc_defer = sil24_qc_defer,
29465 diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29466 index f8a91bf..9cb06b6 100644
29467 --- a/drivers/ata/sata_sis.c
29468 +++ b/drivers/ata/sata_sis.c
29469 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29470 ATA_BMDMA_SHT(DRV_NAME),
29471 };
29472
29473 -static struct ata_port_operations sis_ops = {
29474 +static const struct ata_port_operations sis_ops = {
29475 .inherits = &ata_bmdma_port_ops,
29476 .scr_read = sis_scr_read,
29477 .scr_write = sis_scr_write,
29478 diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29479 index 7257f2d..d04c6f5 100644
29480 --- a/drivers/ata/sata_svw.c
29481 +++ b/drivers/ata/sata_svw.c
29482 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29483 };
29484
29485
29486 -static struct ata_port_operations k2_sata_ops = {
29487 +static const struct ata_port_operations k2_sata_ops = {
29488 .inherits = &ata_bmdma_port_ops,
29489 .sff_tf_load = k2_sata_tf_load,
29490 .sff_tf_read = k2_sata_tf_read,
29491 diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29492 index bbcf970..cd0df0d 100644
29493 --- a/drivers/ata/sata_sx4.c
29494 +++ b/drivers/ata/sata_sx4.c
29495 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29496 };
29497
29498 /* TODO: inherit from base port_ops after converting to new EH */
29499 -static struct ata_port_operations pdc_20621_ops = {
29500 +static const struct ata_port_operations pdc_20621_ops = {
29501 .inherits = &ata_sff_port_ops,
29502
29503 .check_atapi_dma = pdc_check_atapi_dma,
29504 diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29505 index e5bff47..089d859 100644
29506 --- a/drivers/ata/sata_uli.c
29507 +++ b/drivers/ata/sata_uli.c
29508 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29509 ATA_BMDMA_SHT(DRV_NAME),
29510 };
29511
29512 -static struct ata_port_operations uli_ops = {
29513 +static const struct ata_port_operations uli_ops = {
29514 .inherits = &ata_bmdma_port_ops,
29515 .scr_read = uli_scr_read,
29516 .scr_write = uli_scr_write,
29517 diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29518 index f5dcca7..77b94eb 100644
29519 --- a/drivers/ata/sata_via.c
29520 +++ b/drivers/ata/sata_via.c
29521 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29522 ATA_BMDMA_SHT(DRV_NAME),
29523 };
29524
29525 -static struct ata_port_operations svia_base_ops = {
29526 +static const struct ata_port_operations svia_base_ops = {
29527 .inherits = &ata_bmdma_port_ops,
29528 .sff_tf_load = svia_tf_load,
29529 };
29530
29531 -static struct ata_port_operations vt6420_sata_ops = {
29532 +static const struct ata_port_operations vt6420_sata_ops = {
29533 .inherits = &svia_base_ops,
29534 .freeze = svia_noop_freeze,
29535 .prereset = vt6420_prereset,
29536 .bmdma_start = vt6420_bmdma_start,
29537 };
29538
29539 -static struct ata_port_operations vt6421_pata_ops = {
29540 +static const struct ata_port_operations vt6421_pata_ops = {
29541 .inherits = &svia_base_ops,
29542 .cable_detect = vt6421_pata_cable_detect,
29543 .set_piomode = vt6421_set_pio_mode,
29544 .set_dmamode = vt6421_set_dma_mode,
29545 };
29546
29547 -static struct ata_port_operations vt6421_sata_ops = {
29548 +static const struct ata_port_operations vt6421_sata_ops = {
29549 .inherits = &svia_base_ops,
29550 .scr_read = svia_scr_read,
29551 .scr_write = svia_scr_write,
29552 };
29553
29554 -static struct ata_port_operations vt8251_ops = {
29555 +static const struct ata_port_operations vt8251_ops = {
29556 .inherits = &svia_base_ops,
29557 .hardreset = sata_std_hardreset,
29558 .scr_read = vt8251_scr_read,
29559 diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29560 index 8b2a278..51e65d3 100644
29561 --- a/drivers/ata/sata_vsc.c
29562 +++ b/drivers/ata/sata_vsc.c
29563 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29564 };
29565
29566
29567 -static struct ata_port_operations vsc_sata_ops = {
29568 +static const struct ata_port_operations vsc_sata_ops = {
29569 .inherits = &ata_bmdma_port_ops,
29570 /* The IRQ handling is not quite standard SFF behaviour so we
29571 cannot use the default lost interrupt handler */
29572 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29573 index 5effec6..7e4019a 100644
29574 --- a/drivers/atm/adummy.c
29575 +++ b/drivers/atm/adummy.c
29576 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29577 vcc->pop(vcc, skb);
29578 else
29579 dev_kfree_skb_any(skb);
29580 - atomic_inc(&vcc->stats->tx);
29581 + atomic_inc_unchecked(&vcc->stats->tx);
29582
29583 return 0;
29584 }
29585 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29586 index 66e1813..26a27c6 100644
29587 --- a/drivers/atm/ambassador.c
29588 +++ b/drivers/atm/ambassador.c
29589 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29590 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29591
29592 // VC layer stats
29593 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29594 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29595
29596 // free the descriptor
29597 kfree (tx_descr);
29598 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29599 dump_skb ("<<<", vc, skb);
29600
29601 // VC layer stats
29602 - atomic_inc(&atm_vcc->stats->rx);
29603 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29604 __net_timestamp(skb);
29605 // end of our responsability
29606 atm_vcc->push (atm_vcc, skb);
29607 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29608 } else {
29609 PRINTK (KERN_INFO, "dropped over-size frame");
29610 // should we count this?
29611 - atomic_inc(&atm_vcc->stats->rx_drop);
29612 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29613 }
29614
29615 } else {
29616 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29617 }
29618
29619 if (check_area (skb->data, skb->len)) {
29620 - atomic_inc(&atm_vcc->stats->tx_err);
29621 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29622 return -ENOMEM; // ?
29623 }
29624
29625 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29626 index 02ad83d..6daffeb 100644
29627 --- a/drivers/atm/atmtcp.c
29628 +++ b/drivers/atm/atmtcp.c
29629 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29630 if (vcc->pop) vcc->pop(vcc,skb);
29631 else dev_kfree_skb(skb);
29632 if (dev_data) return 0;
29633 - atomic_inc(&vcc->stats->tx_err);
29634 + atomic_inc_unchecked(&vcc->stats->tx_err);
29635 return -ENOLINK;
29636 }
29637 size = skb->len+sizeof(struct atmtcp_hdr);
29638 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29639 if (!new_skb) {
29640 if (vcc->pop) vcc->pop(vcc,skb);
29641 else dev_kfree_skb(skb);
29642 - atomic_inc(&vcc->stats->tx_err);
29643 + atomic_inc_unchecked(&vcc->stats->tx_err);
29644 return -ENOBUFS;
29645 }
29646 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29647 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29648 if (vcc->pop) vcc->pop(vcc,skb);
29649 else dev_kfree_skb(skb);
29650 out_vcc->push(out_vcc,new_skb);
29651 - atomic_inc(&vcc->stats->tx);
29652 - atomic_inc(&out_vcc->stats->rx);
29653 + atomic_inc_unchecked(&vcc->stats->tx);
29654 + atomic_inc_unchecked(&out_vcc->stats->rx);
29655 return 0;
29656 }
29657
29658 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29659 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29660 read_unlock(&vcc_sklist_lock);
29661 if (!out_vcc) {
29662 - atomic_inc(&vcc->stats->tx_err);
29663 + atomic_inc_unchecked(&vcc->stats->tx_err);
29664 goto done;
29665 }
29666 skb_pull(skb,sizeof(struct atmtcp_hdr));
29667 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29668 __net_timestamp(new_skb);
29669 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29670 out_vcc->push(out_vcc,new_skb);
29671 - atomic_inc(&vcc->stats->tx);
29672 - atomic_inc(&out_vcc->stats->rx);
29673 + atomic_inc_unchecked(&vcc->stats->tx);
29674 + atomic_inc_unchecked(&out_vcc->stats->rx);
29675 done:
29676 if (vcc->pop) vcc->pop(vcc,skb);
29677 else dev_kfree_skb(skb);
29678 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29679 index 0c30261..3da356e 100644
29680 --- a/drivers/atm/eni.c
29681 +++ b/drivers/atm/eni.c
29682 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29683 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29684 vcc->dev->number);
29685 length = 0;
29686 - atomic_inc(&vcc->stats->rx_err);
29687 + atomic_inc_unchecked(&vcc->stats->rx_err);
29688 }
29689 else {
29690 length = ATM_CELL_SIZE-1; /* no HEC */
29691 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29692 size);
29693 }
29694 eff = length = 0;
29695 - atomic_inc(&vcc->stats->rx_err);
29696 + atomic_inc_unchecked(&vcc->stats->rx_err);
29697 }
29698 else {
29699 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29700 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29701 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29702 vcc->dev->number,vcc->vci,length,size << 2,descr);
29703 length = eff = 0;
29704 - atomic_inc(&vcc->stats->rx_err);
29705 + atomic_inc_unchecked(&vcc->stats->rx_err);
29706 }
29707 }
29708 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29709 @@ -770,7 +770,7 @@ rx_dequeued++;
29710 vcc->push(vcc,skb);
29711 pushed++;
29712 }
29713 - atomic_inc(&vcc->stats->rx);
29714 + atomic_inc_unchecked(&vcc->stats->rx);
29715 }
29716 wake_up(&eni_dev->rx_wait);
29717 }
29718 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29719 PCI_DMA_TODEVICE);
29720 if (vcc->pop) vcc->pop(vcc,skb);
29721 else dev_kfree_skb_irq(skb);
29722 - atomic_inc(&vcc->stats->tx);
29723 + atomic_inc_unchecked(&vcc->stats->tx);
29724 wake_up(&eni_dev->tx_wait);
29725 dma_complete++;
29726 }
29727 @@ -1570,7 +1570,7 @@ tx_complete++;
29728 /*--------------------------------- entries ---------------------------------*/
29729
29730
29731 -static const char *media_name[] __devinitdata = {
29732 +static const char *media_name[] __devinitconst = {
29733 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29734 "UTP", "05?", "06?", "07?", /* 4- 7 */
29735 "TAXI","09?", "10?", "11?", /* 8-11 */
29736 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29737 index cd5049a..a51209f 100644
29738 --- a/drivers/atm/firestream.c
29739 +++ b/drivers/atm/firestream.c
29740 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29741 }
29742 }
29743
29744 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29745 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29746
29747 fs_dprintk (FS_DEBUG_TXMEM, "i");
29748 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29749 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29750 #endif
29751 skb_put (skb, qe->p1 & 0xffff);
29752 ATM_SKB(skb)->vcc = atm_vcc;
29753 - atomic_inc(&atm_vcc->stats->rx);
29754 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29755 __net_timestamp(skb);
29756 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29757 atm_vcc->push (atm_vcc, skb);
29758 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29759 kfree (pe);
29760 }
29761 if (atm_vcc)
29762 - atomic_inc(&atm_vcc->stats->rx_drop);
29763 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29764 break;
29765 case 0x1f: /* Reassembly abort: no buffers. */
29766 /* Silently increment error counter. */
29767 if (atm_vcc)
29768 - atomic_inc(&atm_vcc->stats->rx_drop);
29769 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29770 break;
29771 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29772 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29773 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29774 index f766cc4..a34002e 100644
29775 --- a/drivers/atm/fore200e.c
29776 +++ b/drivers/atm/fore200e.c
29777 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29778 #endif
29779 /* check error condition */
29780 if (*entry->status & STATUS_ERROR)
29781 - atomic_inc(&vcc->stats->tx_err);
29782 + atomic_inc_unchecked(&vcc->stats->tx_err);
29783 else
29784 - atomic_inc(&vcc->stats->tx);
29785 + atomic_inc_unchecked(&vcc->stats->tx);
29786 }
29787 }
29788
29789 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29790 if (skb == NULL) {
29791 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29792
29793 - atomic_inc(&vcc->stats->rx_drop);
29794 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29795 return -ENOMEM;
29796 }
29797
29798 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29799
29800 dev_kfree_skb_any(skb);
29801
29802 - atomic_inc(&vcc->stats->rx_drop);
29803 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29804 return -ENOMEM;
29805 }
29806
29807 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29808
29809 vcc->push(vcc, skb);
29810 - atomic_inc(&vcc->stats->rx);
29811 + atomic_inc_unchecked(&vcc->stats->rx);
29812
29813 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29814
29815 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29816 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29817 fore200e->atm_dev->number,
29818 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29819 - atomic_inc(&vcc->stats->rx_err);
29820 + atomic_inc_unchecked(&vcc->stats->rx_err);
29821 }
29822 }
29823
29824 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29825 goto retry_here;
29826 }
29827
29828 - atomic_inc(&vcc->stats->tx_err);
29829 + atomic_inc_unchecked(&vcc->stats->tx_err);
29830
29831 fore200e->tx_sat++;
29832 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29833 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29834 index 7066703..2b130de 100644
29835 --- a/drivers/atm/he.c
29836 +++ b/drivers/atm/he.c
29837 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29838
29839 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29840 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29841 - atomic_inc(&vcc->stats->rx_drop);
29842 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29843 goto return_host_buffers;
29844 }
29845
29846 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29847 RBRQ_LEN_ERR(he_dev->rbrq_head)
29848 ? "LEN_ERR" : "",
29849 vcc->vpi, vcc->vci);
29850 - atomic_inc(&vcc->stats->rx_err);
29851 + atomic_inc_unchecked(&vcc->stats->rx_err);
29852 goto return_host_buffers;
29853 }
29854
29855 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29856 vcc->push(vcc, skb);
29857 spin_lock(&he_dev->global_lock);
29858
29859 - atomic_inc(&vcc->stats->rx);
29860 + atomic_inc_unchecked(&vcc->stats->rx);
29861
29862 return_host_buffers:
29863 ++pdus_assembled;
29864 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29865 tpd->vcc->pop(tpd->vcc, tpd->skb);
29866 else
29867 dev_kfree_skb_any(tpd->skb);
29868 - atomic_inc(&tpd->vcc->stats->tx_err);
29869 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29870 }
29871 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29872 return;
29873 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29874 vcc->pop(vcc, skb);
29875 else
29876 dev_kfree_skb_any(skb);
29877 - atomic_inc(&vcc->stats->tx_err);
29878 + atomic_inc_unchecked(&vcc->stats->tx_err);
29879 return -EINVAL;
29880 }
29881
29882 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29883 vcc->pop(vcc, skb);
29884 else
29885 dev_kfree_skb_any(skb);
29886 - atomic_inc(&vcc->stats->tx_err);
29887 + atomic_inc_unchecked(&vcc->stats->tx_err);
29888 return -EINVAL;
29889 }
29890 #endif
29891 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29892 vcc->pop(vcc, skb);
29893 else
29894 dev_kfree_skb_any(skb);
29895 - atomic_inc(&vcc->stats->tx_err);
29896 + atomic_inc_unchecked(&vcc->stats->tx_err);
29897 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29898 return -ENOMEM;
29899 }
29900 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29901 vcc->pop(vcc, skb);
29902 else
29903 dev_kfree_skb_any(skb);
29904 - atomic_inc(&vcc->stats->tx_err);
29905 + atomic_inc_unchecked(&vcc->stats->tx_err);
29906 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29907 return -ENOMEM;
29908 }
29909 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29910 __enqueue_tpd(he_dev, tpd, cid);
29911 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29912
29913 - atomic_inc(&vcc->stats->tx);
29914 + atomic_inc_unchecked(&vcc->stats->tx);
29915
29916 return 0;
29917 }
29918 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29919 index 4e49021..01b1512 100644
29920 --- a/drivers/atm/horizon.c
29921 +++ b/drivers/atm/horizon.c
29922 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29923 {
29924 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29925 // VC layer stats
29926 - atomic_inc(&vcc->stats->rx);
29927 + atomic_inc_unchecked(&vcc->stats->rx);
29928 __net_timestamp(skb);
29929 // end of our responsability
29930 vcc->push (vcc, skb);
29931 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29932 dev->tx_iovec = NULL;
29933
29934 // VC layer stats
29935 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29936 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29937
29938 // free the skb
29939 hrz_kfree_skb (skb);
29940 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29941 index e33ae00..9deb4ab 100644
29942 --- a/drivers/atm/idt77252.c
29943 +++ b/drivers/atm/idt77252.c
29944 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29945 else
29946 dev_kfree_skb(skb);
29947
29948 - atomic_inc(&vcc->stats->tx);
29949 + atomic_inc_unchecked(&vcc->stats->tx);
29950 }
29951
29952 atomic_dec(&scq->used);
29953 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29954 if ((sb = dev_alloc_skb(64)) == NULL) {
29955 printk("%s: Can't allocate buffers for aal0.\n",
29956 card->name);
29957 - atomic_add(i, &vcc->stats->rx_drop);
29958 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
29959 break;
29960 }
29961 if (!atm_charge(vcc, sb->truesize)) {
29962 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29963 card->name);
29964 - atomic_add(i - 1, &vcc->stats->rx_drop);
29965 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29966 dev_kfree_skb(sb);
29967 break;
29968 }
29969 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29970 ATM_SKB(sb)->vcc = vcc;
29971 __net_timestamp(sb);
29972 vcc->push(vcc, sb);
29973 - atomic_inc(&vcc->stats->rx);
29974 + atomic_inc_unchecked(&vcc->stats->rx);
29975
29976 cell += ATM_CELL_PAYLOAD;
29977 }
29978 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29979 "(CDC: %08x)\n",
29980 card->name, len, rpp->len, readl(SAR_REG_CDC));
29981 recycle_rx_pool_skb(card, rpp);
29982 - atomic_inc(&vcc->stats->rx_err);
29983 + atomic_inc_unchecked(&vcc->stats->rx_err);
29984 return;
29985 }
29986 if (stat & SAR_RSQE_CRC) {
29987 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29988 recycle_rx_pool_skb(card, rpp);
29989 - atomic_inc(&vcc->stats->rx_err);
29990 + atomic_inc_unchecked(&vcc->stats->rx_err);
29991 return;
29992 }
29993 if (skb_queue_len(&rpp->queue) > 1) {
29994 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29995 RXPRINTK("%s: Can't alloc RX skb.\n",
29996 card->name);
29997 recycle_rx_pool_skb(card, rpp);
29998 - atomic_inc(&vcc->stats->rx_err);
29999 + atomic_inc_unchecked(&vcc->stats->rx_err);
30000 return;
30001 }
30002 if (!atm_charge(vcc, skb->truesize)) {
30003 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30004 __net_timestamp(skb);
30005
30006 vcc->push(vcc, skb);
30007 - atomic_inc(&vcc->stats->rx);
30008 + atomic_inc_unchecked(&vcc->stats->rx);
30009
30010 return;
30011 }
30012 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30013 __net_timestamp(skb);
30014
30015 vcc->push(vcc, skb);
30016 - atomic_inc(&vcc->stats->rx);
30017 + atomic_inc_unchecked(&vcc->stats->rx);
30018
30019 if (skb->truesize > SAR_FB_SIZE_3)
30020 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
30021 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
30022 if (vcc->qos.aal != ATM_AAL0) {
30023 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
30024 card->name, vpi, vci);
30025 - atomic_inc(&vcc->stats->rx_drop);
30026 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30027 goto drop;
30028 }
30029
30030 if ((sb = dev_alloc_skb(64)) == NULL) {
30031 printk("%s: Can't allocate buffers for AAL0.\n",
30032 card->name);
30033 - atomic_inc(&vcc->stats->rx_err);
30034 + atomic_inc_unchecked(&vcc->stats->rx_err);
30035 goto drop;
30036 }
30037
30038 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
30039 ATM_SKB(sb)->vcc = vcc;
30040 __net_timestamp(sb);
30041 vcc->push(vcc, sb);
30042 - atomic_inc(&vcc->stats->rx);
30043 + atomic_inc_unchecked(&vcc->stats->rx);
30044
30045 drop:
30046 skb_pull(queue, 64);
30047 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30048
30049 if (vc == NULL) {
30050 printk("%s: NULL connection in send().\n", card->name);
30051 - atomic_inc(&vcc->stats->tx_err);
30052 + atomic_inc_unchecked(&vcc->stats->tx_err);
30053 dev_kfree_skb(skb);
30054 return -EINVAL;
30055 }
30056 if (!test_bit(VCF_TX, &vc->flags)) {
30057 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
30058 - atomic_inc(&vcc->stats->tx_err);
30059 + atomic_inc_unchecked(&vcc->stats->tx_err);
30060 dev_kfree_skb(skb);
30061 return -EINVAL;
30062 }
30063 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30064 break;
30065 default:
30066 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
30067 - atomic_inc(&vcc->stats->tx_err);
30068 + atomic_inc_unchecked(&vcc->stats->tx_err);
30069 dev_kfree_skb(skb);
30070 return -EINVAL;
30071 }
30072
30073 if (skb_shinfo(skb)->nr_frags != 0) {
30074 printk("%s: No scatter-gather yet.\n", card->name);
30075 - atomic_inc(&vcc->stats->tx_err);
30076 + atomic_inc_unchecked(&vcc->stats->tx_err);
30077 dev_kfree_skb(skb);
30078 return -EINVAL;
30079 }
30080 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30081
30082 err = queue_skb(card, vc, skb, oam);
30083 if (err) {
30084 - atomic_inc(&vcc->stats->tx_err);
30085 + atomic_inc_unchecked(&vcc->stats->tx_err);
30086 dev_kfree_skb(skb);
30087 return err;
30088 }
30089 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
30090 skb = dev_alloc_skb(64);
30091 if (!skb) {
30092 printk("%s: Out of memory in send_oam().\n", card->name);
30093 - atomic_inc(&vcc->stats->tx_err);
30094 + atomic_inc_unchecked(&vcc->stats->tx_err);
30095 return -ENOMEM;
30096 }
30097 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
30098 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
30099 index b2c1b37..faa672b 100644
30100 --- a/drivers/atm/iphase.c
30101 +++ b/drivers/atm/iphase.c
30102 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
30103 status = (u_short) (buf_desc_ptr->desc_mode);
30104 if (status & (RX_CER | RX_PTE | RX_OFL))
30105 {
30106 - atomic_inc(&vcc->stats->rx_err);
30107 + atomic_inc_unchecked(&vcc->stats->rx_err);
30108 IF_ERR(printk("IA: bad packet, dropping it");)
30109 if (status & RX_CER) {
30110 IF_ERR(printk(" cause: packet CRC error\n");)
30111 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
30112 len = dma_addr - buf_addr;
30113 if (len > iadev->rx_buf_sz) {
30114 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
30115 - atomic_inc(&vcc->stats->rx_err);
30116 + atomic_inc_unchecked(&vcc->stats->rx_err);
30117 goto out_free_desc;
30118 }
30119
30120 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30121 ia_vcc = INPH_IA_VCC(vcc);
30122 if (ia_vcc == NULL)
30123 {
30124 - atomic_inc(&vcc->stats->rx_err);
30125 + atomic_inc_unchecked(&vcc->stats->rx_err);
30126 dev_kfree_skb_any(skb);
30127 atm_return(vcc, atm_guess_pdu2truesize(len));
30128 goto INCR_DLE;
30129 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30130 if ((length > iadev->rx_buf_sz) || (length >
30131 (skb->len - sizeof(struct cpcs_trailer))))
30132 {
30133 - atomic_inc(&vcc->stats->rx_err);
30134 + atomic_inc_unchecked(&vcc->stats->rx_err);
30135 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
30136 length, skb->len);)
30137 dev_kfree_skb_any(skb);
30138 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30139
30140 IF_RX(printk("rx_dle_intr: skb push");)
30141 vcc->push(vcc,skb);
30142 - atomic_inc(&vcc->stats->rx);
30143 + atomic_inc_unchecked(&vcc->stats->rx);
30144 iadev->rx_pkt_cnt++;
30145 }
30146 INCR_DLE:
30147 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
30148 {
30149 struct k_sonet_stats *stats;
30150 stats = &PRIV(_ia_dev[board])->sonet_stats;
30151 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
30152 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
30153 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
30154 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
30155 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
30156 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
30157 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
30158 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
30159 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
30160 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
30161 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
30162 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
30163 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
30164 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
30165 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
30166 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
30167 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
30168 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
30169 }
30170 ia_cmds.status = 0;
30171 break;
30172 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30173 if ((desc == 0) || (desc > iadev->num_tx_desc))
30174 {
30175 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
30176 - atomic_inc(&vcc->stats->tx);
30177 + atomic_inc_unchecked(&vcc->stats->tx);
30178 if (vcc->pop)
30179 vcc->pop(vcc, skb);
30180 else
30181 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30182 ATM_DESC(skb) = vcc->vci;
30183 skb_queue_tail(&iadev->tx_dma_q, skb);
30184
30185 - atomic_inc(&vcc->stats->tx);
30186 + atomic_inc_unchecked(&vcc->stats->tx);
30187 iadev->tx_pkt_cnt++;
30188 /* Increment transaction counter */
30189 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
30190
30191 #if 0
30192 /* add flow control logic */
30193 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
30194 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
30195 if (iavcc->vc_desc_cnt > 10) {
30196 vcc->tx_quota = vcc->tx_quota * 3 / 4;
30197 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
30198 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
30199 index cf97c34..8d30655 100644
30200 --- a/drivers/atm/lanai.c
30201 +++ b/drivers/atm/lanai.c
30202 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
30203 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
30204 lanai_endtx(lanai, lvcc);
30205 lanai_free_skb(lvcc->tx.atmvcc, skb);
30206 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
30207 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
30208 }
30209
30210 /* Try to fill the buffer - don't call unless there is backlog */
30211 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
30212 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
30213 __net_timestamp(skb);
30214 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
30215 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
30216 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
30217 out:
30218 lvcc->rx.buf.ptr = end;
30219 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
30220 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30221 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
30222 "vcc %d\n", lanai->number, (unsigned int) s, vci);
30223 lanai->stats.service_rxnotaal5++;
30224 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30225 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30226 return 0;
30227 }
30228 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
30229 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30230 int bytes;
30231 read_unlock(&vcc_sklist_lock);
30232 DPRINTK("got trashed rx pdu on vci %d\n", vci);
30233 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30234 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30235 lvcc->stats.x.aal5.service_trash++;
30236 bytes = (SERVICE_GET_END(s) * 16) -
30237 (((unsigned long) lvcc->rx.buf.ptr) -
30238 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30239 }
30240 if (s & SERVICE_STREAM) {
30241 read_unlock(&vcc_sklist_lock);
30242 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30243 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30244 lvcc->stats.x.aal5.service_stream++;
30245 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
30246 "PDU on VCI %d!\n", lanai->number, vci);
30247 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30248 return 0;
30249 }
30250 DPRINTK("got rx crc error on vci %d\n", vci);
30251 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30252 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30253 lvcc->stats.x.aal5.service_rxcrc++;
30254 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
30255 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
30256 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
30257 index 3da804b..d3b0eed 100644
30258 --- a/drivers/atm/nicstar.c
30259 +++ b/drivers/atm/nicstar.c
30260 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30261 if ((vc = (vc_map *) vcc->dev_data) == NULL)
30262 {
30263 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
30264 - atomic_inc(&vcc->stats->tx_err);
30265 + atomic_inc_unchecked(&vcc->stats->tx_err);
30266 dev_kfree_skb_any(skb);
30267 return -EINVAL;
30268 }
30269 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30270 if (!vc->tx)
30271 {
30272 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
30273 - atomic_inc(&vcc->stats->tx_err);
30274 + atomic_inc_unchecked(&vcc->stats->tx_err);
30275 dev_kfree_skb_any(skb);
30276 return -EINVAL;
30277 }
30278 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30279 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
30280 {
30281 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
30282 - atomic_inc(&vcc->stats->tx_err);
30283 + atomic_inc_unchecked(&vcc->stats->tx_err);
30284 dev_kfree_skb_any(skb);
30285 return -EINVAL;
30286 }
30287 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30288 if (skb_shinfo(skb)->nr_frags != 0)
30289 {
30290 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30291 - atomic_inc(&vcc->stats->tx_err);
30292 + atomic_inc_unchecked(&vcc->stats->tx_err);
30293 dev_kfree_skb_any(skb);
30294 return -EINVAL;
30295 }
30296 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30297
30298 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
30299 {
30300 - atomic_inc(&vcc->stats->tx_err);
30301 + atomic_inc_unchecked(&vcc->stats->tx_err);
30302 dev_kfree_skb_any(skb);
30303 return -EIO;
30304 }
30305 - atomic_inc(&vcc->stats->tx);
30306 + atomic_inc_unchecked(&vcc->stats->tx);
30307
30308 return 0;
30309 }
30310 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30311 {
30312 printk("nicstar%d: Can't allocate buffers for aal0.\n",
30313 card->index);
30314 - atomic_add(i,&vcc->stats->rx_drop);
30315 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
30316 break;
30317 }
30318 if (!atm_charge(vcc, sb->truesize))
30319 {
30320 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
30321 card->index);
30322 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30323 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30324 dev_kfree_skb_any(sb);
30325 break;
30326 }
30327 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30328 ATM_SKB(sb)->vcc = vcc;
30329 __net_timestamp(sb);
30330 vcc->push(vcc, sb);
30331 - atomic_inc(&vcc->stats->rx);
30332 + atomic_inc_unchecked(&vcc->stats->rx);
30333 cell += ATM_CELL_PAYLOAD;
30334 }
30335
30336 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30337 if (iovb == NULL)
30338 {
30339 printk("nicstar%d: Out of iovec buffers.\n", card->index);
30340 - atomic_inc(&vcc->stats->rx_drop);
30341 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30342 recycle_rx_buf(card, skb);
30343 return;
30344 }
30345 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30346 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
30347 {
30348 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30349 - atomic_inc(&vcc->stats->rx_err);
30350 + atomic_inc_unchecked(&vcc->stats->rx_err);
30351 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
30352 NS_SKB(iovb)->iovcnt = 0;
30353 iovb->len = 0;
30354 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30355 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
30356 card->index);
30357 which_list(card, skb);
30358 - atomic_inc(&vcc->stats->rx_err);
30359 + atomic_inc_unchecked(&vcc->stats->rx_err);
30360 recycle_rx_buf(card, skb);
30361 vc->rx_iov = NULL;
30362 recycle_iov_buf(card, iovb);
30363 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30364 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
30365 card->index);
30366 which_list(card, skb);
30367 - atomic_inc(&vcc->stats->rx_err);
30368 + atomic_inc_unchecked(&vcc->stats->rx_err);
30369 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30370 NS_SKB(iovb)->iovcnt);
30371 vc->rx_iov = NULL;
30372 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30373 printk(" - PDU size mismatch.\n");
30374 else
30375 printk(".\n");
30376 - atomic_inc(&vcc->stats->rx_err);
30377 + atomic_inc_unchecked(&vcc->stats->rx_err);
30378 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30379 NS_SKB(iovb)->iovcnt);
30380 vc->rx_iov = NULL;
30381 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30382 if (!atm_charge(vcc, skb->truesize))
30383 {
30384 push_rxbufs(card, skb);
30385 - atomic_inc(&vcc->stats->rx_drop);
30386 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30387 }
30388 else
30389 {
30390 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30391 ATM_SKB(skb)->vcc = vcc;
30392 __net_timestamp(skb);
30393 vcc->push(vcc, skb);
30394 - atomic_inc(&vcc->stats->rx);
30395 + atomic_inc_unchecked(&vcc->stats->rx);
30396 }
30397 }
30398 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30399 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30400 if (!atm_charge(vcc, sb->truesize))
30401 {
30402 push_rxbufs(card, sb);
30403 - atomic_inc(&vcc->stats->rx_drop);
30404 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30405 }
30406 else
30407 {
30408 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30409 ATM_SKB(sb)->vcc = vcc;
30410 __net_timestamp(sb);
30411 vcc->push(vcc, sb);
30412 - atomic_inc(&vcc->stats->rx);
30413 + atomic_inc_unchecked(&vcc->stats->rx);
30414 }
30415
30416 push_rxbufs(card, skb);
30417 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30418 if (!atm_charge(vcc, skb->truesize))
30419 {
30420 push_rxbufs(card, skb);
30421 - atomic_inc(&vcc->stats->rx_drop);
30422 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30423 }
30424 else
30425 {
30426 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30427 ATM_SKB(skb)->vcc = vcc;
30428 __net_timestamp(skb);
30429 vcc->push(vcc, skb);
30430 - atomic_inc(&vcc->stats->rx);
30431 + atomic_inc_unchecked(&vcc->stats->rx);
30432 }
30433
30434 push_rxbufs(card, sb);
30435 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30436 if (hb == NULL)
30437 {
30438 printk("nicstar%d: Out of huge buffers.\n", card->index);
30439 - atomic_inc(&vcc->stats->rx_drop);
30440 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30441 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30442 NS_SKB(iovb)->iovcnt);
30443 vc->rx_iov = NULL;
30444 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30445 }
30446 else
30447 dev_kfree_skb_any(hb);
30448 - atomic_inc(&vcc->stats->rx_drop);
30449 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30450 }
30451 else
30452 {
30453 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30454 #endif /* NS_USE_DESTRUCTORS */
30455 __net_timestamp(hb);
30456 vcc->push(vcc, hb);
30457 - atomic_inc(&vcc->stats->rx);
30458 + atomic_inc_unchecked(&vcc->stats->rx);
30459 }
30460 }
30461
30462 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30463 index 84c93ff..e6ed269 100644
30464 --- a/drivers/atm/solos-pci.c
30465 +++ b/drivers/atm/solos-pci.c
30466 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30467 }
30468 atm_charge(vcc, skb->truesize);
30469 vcc->push(vcc, skb);
30470 - atomic_inc(&vcc->stats->rx);
30471 + atomic_inc_unchecked(&vcc->stats->rx);
30472 break;
30473
30474 case PKT_STATUS:
30475 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30476 char msg[500];
30477 char item[10];
30478
30479 + pax_track_stack();
30480 +
30481 len = buf->len;
30482 for (i = 0; i < len; i++){
30483 if(i % 8 == 0)
30484 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30485 vcc = SKB_CB(oldskb)->vcc;
30486
30487 if (vcc) {
30488 - atomic_inc(&vcc->stats->tx);
30489 + atomic_inc_unchecked(&vcc->stats->tx);
30490 solos_pop(vcc, oldskb);
30491 } else
30492 dev_kfree_skb_irq(oldskb);
30493 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30494 index 6dd3f59..ee377f3 100644
30495 --- a/drivers/atm/suni.c
30496 +++ b/drivers/atm/suni.c
30497 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30498
30499
30500 #define ADD_LIMITED(s,v) \
30501 - atomic_add((v),&stats->s); \
30502 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30503 + atomic_add_unchecked((v),&stats->s); \
30504 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30505
30506
30507 static void suni_hz(unsigned long from_timer)
30508 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30509 index fc8cb07..4a80e53 100644
30510 --- a/drivers/atm/uPD98402.c
30511 +++ b/drivers/atm/uPD98402.c
30512 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30513 struct sonet_stats tmp;
30514 int error = 0;
30515
30516 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30517 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30518 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30519 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30520 if (zero && !error) {
30521 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30522
30523
30524 #define ADD_LIMITED(s,v) \
30525 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30526 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30527 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30528 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30529 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30530 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30531
30532
30533 static void stat_event(struct atm_dev *dev)
30534 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30535 if (reason & uPD98402_INT_PFM) stat_event(dev);
30536 if (reason & uPD98402_INT_PCO) {
30537 (void) GET(PCOCR); /* clear interrupt cause */
30538 - atomic_add(GET(HECCT),
30539 + atomic_add_unchecked(GET(HECCT),
30540 &PRIV(dev)->sonet_stats.uncorr_hcs);
30541 }
30542 if ((reason & uPD98402_INT_RFO) &&
30543 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30544 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30545 uPD98402_INT_LOS),PIMR); /* enable them */
30546 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30547 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30548 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30549 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30550 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30551 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30552 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30553 return 0;
30554 }
30555
30556 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30557 index 2e9635b..32927b4 100644
30558 --- a/drivers/atm/zatm.c
30559 +++ b/drivers/atm/zatm.c
30560 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30561 }
30562 if (!size) {
30563 dev_kfree_skb_irq(skb);
30564 - if (vcc) atomic_inc(&vcc->stats->rx_err);
30565 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30566 continue;
30567 }
30568 if (!atm_charge(vcc,skb->truesize)) {
30569 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30570 skb->len = size;
30571 ATM_SKB(skb)->vcc = vcc;
30572 vcc->push(vcc,skb);
30573 - atomic_inc(&vcc->stats->rx);
30574 + atomic_inc_unchecked(&vcc->stats->rx);
30575 }
30576 zout(pos & 0xffff,MTA(mbx));
30577 #if 0 /* probably a stupid idea */
30578 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30579 skb_queue_head(&zatm_vcc->backlog,skb);
30580 break;
30581 }
30582 - atomic_inc(&vcc->stats->tx);
30583 + atomic_inc_unchecked(&vcc->stats->tx);
30584 wake_up(&zatm_vcc->tx_wait);
30585 }
30586
30587 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30588 index 63c143e..fece183 100644
30589 --- a/drivers/base/bus.c
30590 +++ b/drivers/base/bus.c
30591 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30592 return ret;
30593 }
30594
30595 -static struct sysfs_ops driver_sysfs_ops = {
30596 +static const struct sysfs_ops driver_sysfs_ops = {
30597 .show = drv_attr_show,
30598 .store = drv_attr_store,
30599 };
30600 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30601 return ret;
30602 }
30603
30604 -static struct sysfs_ops bus_sysfs_ops = {
30605 +static const struct sysfs_ops bus_sysfs_ops = {
30606 .show = bus_attr_show,
30607 .store = bus_attr_store,
30608 };
30609 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30610 return 0;
30611 }
30612
30613 -static struct kset_uevent_ops bus_uevent_ops = {
30614 +static const struct kset_uevent_ops bus_uevent_ops = {
30615 .filter = bus_uevent_filter,
30616 };
30617
30618 diff --git a/drivers/base/class.c b/drivers/base/class.c
30619 index 6e2c3b0..cb61871 100644
30620 --- a/drivers/base/class.c
30621 +++ b/drivers/base/class.c
30622 @@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30623 kfree(cp);
30624 }
30625
30626 -static struct sysfs_ops class_sysfs_ops = {
30627 +static const struct sysfs_ops class_sysfs_ops = {
30628 .show = class_attr_show,
30629 .store = class_attr_store,
30630 };
30631 diff --git a/drivers/base/core.c b/drivers/base/core.c
30632 index f33d768..a9358d0 100644
30633 --- a/drivers/base/core.c
30634 +++ b/drivers/base/core.c
30635 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30636 return ret;
30637 }
30638
30639 -static struct sysfs_ops dev_sysfs_ops = {
30640 +static const struct sysfs_ops dev_sysfs_ops = {
30641 .show = dev_attr_show,
30642 .store = dev_attr_store,
30643 };
30644 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30645 return retval;
30646 }
30647
30648 -static struct kset_uevent_ops device_uevent_ops = {
30649 +static const struct kset_uevent_ops device_uevent_ops = {
30650 .filter = dev_uevent_filter,
30651 .name = dev_uevent_name,
30652 .uevent = dev_uevent,
30653 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30654 index 989429c..2272b00 100644
30655 --- a/drivers/base/memory.c
30656 +++ b/drivers/base/memory.c
30657 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30658 return retval;
30659 }
30660
30661 -static struct kset_uevent_ops memory_uevent_ops = {
30662 +static const struct kset_uevent_ops memory_uevent_ops = {
30663 .name = memory_uevent_name,
30664 .uevent = memory_uevent,
30665 };
30666 diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30667 index 3f202f7..61c4a6f 100644
30668 --- a/drivers/base/sys.c
30669 +++ b/drivers/base/sys.c
30670 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30671 return -EIO;
30672 }
30673
30674 -static struct sysfs_ops sysfs_ops = {
30675 +static const struct sysfs_ops sysfs_ops = {
30676 .show = sysdev_show,
30677 .store = sysdev_store,
30678 };
30679 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30680 return -EIO;
30681 }
30682
30683 -static struct sysfs_ops sysfs_class_ops = {
30684 +static const struct sysfs_ops sysfs_class_ops = {
30685 .show = sysdev_class_show,
30686 .store = sysdev_class_store,
30687 };
30688 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30689 index eb4fa19..1954777 100644
30690 --- a/drivers/block/DAC960.c
30691 +++ b/drivers/block/DAC960.c
30692 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30693 unsigned long flags;
30694 int Channel, TargetID;
30695
30696 + pax_track_stack();
30697 +
30698 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30699 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30700 sizeof(DAC960_SCSI_Inquiry_T) +
30701 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30702 index 68b90d9..7e2e3f3 100644
30703 --- a/drivers/block/cciss.c
30704 +++ b/drivers/block/cciss.c
30705 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30706 int err;
30707 u32 cp;
30708
30709 + memset(&arg64, 0, sizeof(arg64));
30710 +
30711 err = 0;
30712 err |=
30713 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30714 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30715 /* Wait (up to 20 seconds) for a command to complete */
30716
30717 for (i = 20 * HZ; i > 0; i--) {
30718 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30719 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30720 if (done == FIFO_EMPTY)
30721 schedule_timeout_uninterruptible(1);
30722 else
30723 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30724 resend_cmd1:
30725
30726 /* Disable interrupt on the board. */
30727 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
30728 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
30729
30730 /* Make sure there is room in the command FIFO */
30731 /* Actually it should be completely empty at this time */
30732 @@ -2884,13 +2886,13 @@ resend_cmd1:
30733 /* tape side of the driver. */
30734 for (i = 200000; i > 0; i--) {
30735 /* if fifo isn't full go */
30736 - if (!(h->access.fifo_full(h)))
30737 + if (!(h->access->fifo_full(h)))
30738 break;
30739 udelay(10);
30740 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30741 " waiting!\n", h->ctlr);
30742 }
30743 - h->access.submit_command(h, c); /* Send the cmd */
30744 + h->access->submit_command(h, c); /* Send the cmd */
30745 do {
30746 complete = pollcomplete(h->ctlr);
30747
30748 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30749 while (!hlist_empty(&h->reqQ)) {
30750 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30751 /* can't do anything if fifo is full */
30752 - if ((h->access.fifo_full(h))) {
30753 + if ((h->access->fifo_full(h))) {
30754 printk(KERN_WARNING "cciss: fifo full\n");
30755 break;
30756 }
30757 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30758 h->Qdepth--;
30759
30760 /* Tell the controller execute command */
30761 - h->access.submit_command(h, c);
30762 + h->access->submit_command(h, c);
30763
30764 /* Put job onto the completed Q */
30765 addQ(&h->cmpQ, c);
30766 @@ -3393,17 +3395,17 @@ startio:
30767
30768 static inline unsigned long get_next_completion(ctlr_info_t *h)
30769 {
30770 - return h->access.command_completed(h);
30771 + return h->access->command_completed(h);
30772 }
30773
30774 static inline int interrupt_pending(ctlr_info_t *h)
30775 {
30776 - return h->access.intr_pending(h);
30777 + return h->access->intr_pending(h);
30778 }
30779
30780 static inline long interrupt_not_for_us(ctlr_info_t *h)
30781 {
30782 - return (((h->access.intr_pending(h) == 0) ||
30783 + return (((h->access->intr_pending(h) == 0) ||
30784 (h->interrupts_enabled == 0)));
30785 }
30786
30787 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30788 */
30789 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30790 c->product_name = products[prod_index].product_name;
30791 - c->access = *(products[prod_index].access);
30792 + c->access = products[prod_index].access;
30793 c->nr_cmds = c->max_commands - 4;
30794 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30795 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30796 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30797 }
30798
30799 /* make sure the board interrupts are off */
30800 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30801 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30802 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30803 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30804 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30805 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30806 cciss_scsi_setup(i);
30807
30808 /* Turn the interrupts on so we can service requests */
30809 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30810 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30811
30812 /* Get the firmware version */
30813 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30814 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30815 index 04d6bf8..36e712d 100644
30816 --- a/drivers/block/cciss.h
30817 +++ b/drivers/block/cciss.h
30818 @@ -90,7 +90,7 @@ struct ctlr_info
30819 // information about each logical volume
30820 drive_info_struct *drv[CISS_MAX_LUN];
30821
30822 - struct access_method access;
30823 + struct access_method *access;
30824
30825 /* queue and queue Info */
30826 struct hlist_head reqQ;
30827 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30828 index 6422651..bb1bdef 100644
30829 --- a/drivers/block/cpqarray.c
30830 +++ b/drivers/block/cpqarray.c
30831 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30832 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30833 goto Enomem4;
30834 }
30835 - hba[i]->access.set_intr_mask(hba[i], 0);
30836 + hba[i]->access->set_intr_mask(hba[i], 0);
30837 if (request_irq(hba[i]->intr, do_ida_intr,
30838 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30839 {
30840 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30841 add_timer(&hba[i]->timer);
30842
30843 /* Enable IRQ now that spinlock and rate limit timer are set up */
30844 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30845 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30846
30847 for(j=0; j<NWD; j++) {
30848 struct gendisk *disk = ida_gendisk[i][j];
30849 @@ -695,7 +695,7 @@ DBGINFO(
30850 for(i=0; i<NR_PRODUCTS; i++) {
30851 if (board_id == products[i].board_id) {
30852 c->product_name = products[i].product_name;
30853 - c->access = *(products[i].access);
30854 + c->access = products[i].access;
30855 break;
30856 }
30857 }
30858 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30859 hba[ctlr]->intr = intr;
30860 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30861 hba[ctlr]->product_name = products[j].product_name;
30862 - hba[ctlr]->access = *(products[j].access);
30863 + hba[ctlr]->access = products[j].access;
30864 hba[ctlr]->ctlr = ctlr;
30865 hba[ctlr]->board_id = board_id;
30866 hba[ctlr]->pci_dev = NULL; /* not PCI */
30867 @@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30868 struct scatterlist tmp_sg[SG_MAX];
30869 int i, dir, seg;
30870
30871 + pax_track_stack();
30872 +
30873 if (blk_queue_plugged(q))
30874 goto startio;
30875
30876 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30877
30878 while((c = h->reqQ) != NULL) {
30879 /* Can't do anything if we're busy */
30880 - if (h->access.fifo_full(h) == 0)
30881 + if (h->access->fifo_full(h) == 0)
30882 return;
30883
30884 /* Get the first entry from the request Q */
30885 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30886 h->Qdepth--;
30887
30888 /* Tell the controller to do our bidding */
30889 - h->access.submit_command(h, c);
30890 + h->access->submit_command(h, c);
30891
30892 /* Get onto the completion Q */
30893 addQ(&h->cmpQ, c);
30894 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30895 unsigned long flags;
30896 __u32 a,a1;
30897
30898 - istat = h->access.intr_pending(h);
30899 + istat = h->access->intr_pending(h);
30900 /* Is this interrupt for us? */
30901 if (istat == 0)
30902 return IRQ_NONE;
30903 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30904 */
30905 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30906 if (istat & FIFO_NOT_EMPTY) {
30907 - while((a = h->access.command_completed(h))) {
30908 + while((a = h->access->command_completed(h))) {
30909 a1 = a; a &= ~3;
30910 if ((c = h->cmpQ) == NULL)
30911 {
30912 @@ -1434,11 +1436,11 @@ static int sendcmd(
30913 /*
30914 * Disable interrupt
30915 */
30916 - info_p->access.set_intr_mask(info_p, 0);
30917 + info_p->access->set_intr_mask(info_p, 0);
30918 /* Make sure there is room in the command FIFO */
30919 /* Actually it should be completely empty at this time. */
30920 for (i = 200000; i > 0; i--) {
30921 - temp = info_p->access.fifo_full(info_p);
30922 + temp = info_p->access->fifo_full(info_p);
30923 if (temp != 0) {
30924 break;
30925 }
30926 @@ -1451,7 +1453,7 @@ DBG(
30927 /*
30928 * Send the cmd
30929 */
30930 - info_p->access.submit_command(info_p, c);
30931 + info_p->access->submit_command(info_p, c);
30932 complete = pollcomplete(ctlr);
30933
30934 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30935 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30936 * we check the new geometry. Then turn interrupts back on when
30937 * we're done.
30938 */
30939 - host->access.set_intr_mask(host, 0);
30940 + host->access->set_intr_mask(host, 0);
30941 getgeometry(ctlr);
30942 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30943 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30944
30945 for(i=0; i<NWD; i++) {
30946 struct gendisk *disk = ida_gendisk[ctlr][i];
30947 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30948 /* Wait (up to 2 seconds) for a command to complete */
30949
30950 for (i = 200000; i > 0; i--) {
30951 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30952 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30953 if (done == 0) {
30954 udelay(10); /* a short fixed delay */
30955 } else
30956 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30957 index be73e9d..7fbf140 100644
30958 --- a/drivers/block/cpqarray.h
30959 +++ b/drivers/block/cpqarray.h
30960 @@ -99,7 +99,7 @@ struct ctlr_info {
30961 drv_info_t drv[NWD];
30962 struct proc_dir_entry *proc;
30963
30964 - struct access_method access;
30965 + struct access_method *access;
30966
30967 cmdlist_t *reqQ;
30968 cmdlist_t *cmpQ;
30969 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30970 index 8ec2d70..2804b30 100644
30971 --- a/drivers/block/loop.c
30972 +++ b/drivers/block/loop.c
30973 @@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30974 mm_segment_t old_fs = get_fs();
30975
30976 set_fs(get_ds());
30977 - bw = file->f_op->write(file, buf, len, &pos);
30978 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30979 set_fs(old_fs);
30980 if (likely(bw == len))
30981 return 0;
30982 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30983 index 26ada47..083c480 100644
30984 --- a/drivers/block/nbd.c
30985 +++ b/drivers/block/nbd.c
30986 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30987 struct kvec iov;
30988 sigset_t blocked, oldset;
30989
30990 + pax_track_stack();
30991 +
30992 if (unlikely(!sock)) {
30993 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30994 lo->disk->disk_name, (send ? "send" : "recv"));
30995 @@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30996 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30997 unsigned int cmd, unsigned long arg)
30998 {
30999 + pax_track_stack();
31000 +
31001 switch (cmd) {
31002 case NBD_DISCONNECT: {
31003 struct request sreq;
31004 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
31005 index a5d585d..d087be3 100644
31006 --- a/drivers/block/pktcdvd.c
31007 +++ b/drivers/block/pktcdvd.c
31008 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
31009 return len;
31010 }
31011
31012 -static struct sysfs_ops kobj_pkt_ops = {
31013 +static const struct sysfs_ops kobj_pkt_ops = {
31014 .show = kobj_pkt_show,
31015 .store = kobj_pkt_store
31016 };
31017 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
31018 index 6aad99e..89cd142 100644
31019 --- a/drivers/char/Kconfig
31020 +++ b/drivers/char/Kconfig
31021 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
31022
31023 config DEVKMEM
31024 bool "/dev/kmem virtual device support"
31025 - default y
31026 + default n
31027 + depends on !GRKERNSEC_KMEM
31028 help
31029 Say Y here if you want to support the /dev/kmem device. The
31030 /dev/kmem device is rarely used, but can be used for certain
31031 @@ -1114,6 +1115,7 @@ config DEVPORT
31032 bool
31033 depends on !M68K
31034 depends on ISA || PCI
31035 + depends on !GRKERNSEC_KMEM
31036 default y
31037
31038 source "drivers/s390/char/Kconfig"
31039 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
31040 index a96f319..a778a5b 100644
31041 --- a/drivers/char/agp/frontend.c
31042 +++ b/drivers/char/agp/frontend.c
31043 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
31044 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
31045 return -EFAULT;
31046
31047 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
31048 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
31049 return -EFAULT;
31050
31051 client = agp_find_client_by_pid(reserve.pid);
31052 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
31053 index d8cff90..9628e70 100644
31054 --- a/drivers/char/briq_panel.c
31055 +++ b/drivers/char/briq_panel.c
31056 @@ -10,6 +10,7 @@
31057 #include <linux/types.h>
31058 #include <linux/errno.h>
31059 #include <linux/tty.h>
31060 +#include <linux/mutex.h>
31061 #include <linux/timer.h>
31062 #include <linux/kernel.h>
31063 #include <linux/wait.h>
31064 @@ -36,6 +37,7 @@ static int vfd_is_open;
31065 static unsigned char vfd[40];
31066 static int vfd_cursor;
31067 static unsigned char ledpb, led;
31068 +static DEFINE_MUTEX(vfd_mutex);
31069
31070 static void update_vfd(void)
31071 {
31072 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31073 if (!vfd_is_open)
31074 return -EBUSY;
31075
31076 + mutex_lock(&vfd_mutex);
31077 for (;;) {
31078 char c;
31079 if (!indx)
31080 break;
31081 - if (get_user(c, buf))
31082 + if (get_user(c, buf)) {
31083 + mutex_unlock(&vfd_mutex);
31084 return -EFAULT;
31085 + }
31086 if (esc) {
31087 set_led(c);
31088 esc = 0;
31089 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31090 buf++;
31091 }
31092 update_vfd();
31093 + mutex_unlock(&vfd_mutex);
31094
31095 return len;
31096 }
31097 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
31098 index 31e7c91..161afc0 100644
31099 --- a/drivers/char/genrtc.c
31100 +++ b/drivers/char/genrtc.c
31101 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
31102 switch (cmd) {
31103
31104 case RTC_PLL_GET:
31105 + memset(&pll, 0, sizeof(pll));
31106 if (get_rtc_pll(&pll))
31107 return -EINVAL;
31108 else
31109 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
31110 index 006466d..a2bb21c 100644
31111 --- a/drivers/char/hpet.c
31112 +++ b/drivers/char/hpet.c
31113 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
31114 return 0;
31115 }
31116
31117 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
31118 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
31119
31120 static int
31121 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
31122 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
31123 }
31124
31125 static int
31126 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31127 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
31128 {
31129 struct hpet_timer __iomem *timer;
31130 struct hpet __iomem *hpet;
31131 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31132 {
31133 struct hpet_info info;
31134
31135 + memset(&info, 0, sizeof(info));
31136 +
31137 if (devp->hd_ireqfreq)
31138 info.hi_ireqfreq =
31139 hpet_time_div(hpetp, devp->hd_ireqfreq);
31140 - else
31141 - info.hi_ireqfreq = 0;
31142 info.hi_flags =
31143 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
31144 info.hi_hpet = hpetp->hp_which;
31145 diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
31146 index 0afc8b8..6913fc3 100644
31147 --- a/drivers/char/hvc_beat.c
31148 +++ b/drivers/char/hvc_beat.c
31149 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
31150 return cnt;
31151 }
31152
31153 -static struct hv_ops hvc_beat_get_put_ops = {
31154 +static const struct hv_ops hvc_beat_get_put_ops = {
31155 .get_chars = hvc_beat_get_chars,
31156 .put_chars = hvc_beat_put_chars,
31157 };
31158 diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
31159 index 98097f2..407dddc 100644
31160 --- a/drivers/char/hvc_console.c
31161 +++ b/drivers/char/hvc_console.c
31162 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
31163 * console interfaces but can still be used as a tty device. This has to be
31164 * static because kmalloc will not work during early console init.
31165 */
31166 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31167 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31168 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
31169 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
31170
31171 @@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
31172 * vty adapters do NOT get an hvc_instantiate() callback since they
31173 * appear after early console init.
31174 */
31175 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
31176 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
31177 {
31178 struct hvc_struct *hp;
31179
31180 @@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
31181 };
31182
31183 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
31184 - struct hv_ops *ops, int outbuf_size)
31185 + const struct hv_ops *ops, int outbuf_size)
31186 {
31187 struct hvc_struct *hp;
31188 int i;
31189 diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
31190 index 10950ca..ed176c3 100644
31191 --- a/drivers/char/hvc_console.h
31192 +++ b/drivers/char/hvc_console.h
31193 @@ -55,7 +55,7 @@ struct hvc_struct {
31194 int outbuf_size;
31195 int n_outbuf;
31196 uint32_t vtermno;
31197 - struct hv_ops *ops;
31198 + const struct hv_ops *ops;
31199 int irq_requested;
31200 int data;
31201 struct winsize ws;
31202 @@ -76,11 +76,11 @@ struct hv_ops {
31203 };
31204
31205 /* Register a vterm and a slot index for use as a console (console_init) */
31206 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
31207 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
31208
31209 /* register a vterm for hvc tty operation (module_init or hotplug add) */
31210 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
31211 - struct hv_ops *ops, int outbuf_size);
31212 + const struct hv_ops *ops, int outbuf_size);
31213 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
31214 extern int hvc_remove(struct hvc_struct *hp);
31215
31216 diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
31217 index 936d05b..fd02426 100644
31218 --- a/drivers/char/hvc_iseries.c
31219 +++ b/drivers/char/hvc_iseries.c
31220 @@ -197,7 +197,7 @@ done:
31221 return sent;
31222 }
31223
31224 -static struct hv_ops hvc_get_put_ops = {
31225 +static const struct hv_ops hvc_get_put_ops = {
31226 .get_chars = get_chars,
31227 .put_chars = put_chars,
31228 .notifier_add = notifier_add_irq,
31229 diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
31230 index b0e168f..69cda2a 100644
31231 --- a/drivers/char/hvc_iucv.c
31232 +++ b/drivers/char/hvc_iucv.c
31233 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
31234
31235
31236 /* HVC operations */
31237 -static struct hv_ops hvc_iucv_ops = {
31238 +static const struct hv_ops hvc_iucv_ops = {
31239 .get_chars = hvc_iucv_get_chars,
31240 .put_chars = hvc_iucv_put_chars,
31241 .notifier_add = hvc_iucv_notifier_add,
31242 diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
31243 index 88590d0..61c4a61 100644
31244 --- a/drivers/char/hvc_rtas.c
31245 +++ b/drivers/char/hvc_rtas.c
31246 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
31247 return i;
31248 }
31249
31250 -static struct hv_ops hvc_rtas_get_put_ops = {
31251 +static const struct hv_ops hvc_rtas_get_put_ops = {
31252 .get_chars = hvc_rtas_read_console,
31253 .put_chars = hvc_rtas_write_console,
31254 };
31255 diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
31256 index bd63ba8..b0957e6 100644
31257 --- a/drivers/char/hvc_udbg.c
31258 +++ b/drivers/char/hvc_udbg.c
31259 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
31260 return i;
31261 }
31262
31263 -static struct hv_ops hvc_udbg_ops = {
31264 +static const struct hv_ops hvc_udbg_ops = {
31265 .get_chars = hvc_udbg_get,
31266 .put_chars = hvc_udbg_put,
31267 };
31268 diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
31269 index 10be343..27370e9 100644
31270 --- a/drivers/char/hvc_vio.c
31271 +++ b/drivers/char/hvc_vio.c
31272 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
31273 return got;
31274 }
31275
31276 -static struct hv_ops hvc_get_put_ops = {
31277 +static const struct hv_ops hvc_get_put_ops = {
31278 .get_chars = filtered_get_chars,
31279 .put_chars = hvc_put_chars,
31280 .notifier_add = notifier_add_irq,
31281 diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
31282 index a6ee32b..94f8c26 100644
31283 --- a/drivers/char/hvc_xen.c
31284 +++ b/drivers/char/hvc_xen.c
31285 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
31286 return recv;
31287 }
31288
31289 -static struct hv_ops hvc_ops = {
31290 +static const struct hv_ops hvc_ops = {
31291 .get_chars = read_console,
31292 .put_chars = write_console,
31293 .notifier_add = notifier_add_irq,
31294 diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
31295 index 266b858..f3ee0bb 100644
31296 --- a/drivers/char/hvcs.c
31297 +++ b/drivers/char/hvcs.c
31298 @@ -82,6 +82,7 @@
31299 #include <asm/hvcserver.h>
31300 #include <asm/uaccess.h>
31301 #include <asm/vio.h>
31302 +#include <asm/local.h>
31303
31304 /*
31305 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
31306 @@ -269,7 +270,7 @@ struct hvcs_struct {
31307 unsigned int index;
31308
31309 struct tty_struct *tty;
31310 - int open_count;
31311 + local_t open_count;
31312
31313 /*
31314 * Used to tell the driver kernel_thread what operations need to take
31315 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
31316
31317 spin_lock_irqsave(&hvcsd->lock, flags);
31318
31319 - if (hvcsd->open_count > 0) {
31320 + if (local_read(&hvcsd->open_count) > 0) {
31321 spin_unlock_irqrestore(&hvcsd->lock, flags);
31322 printk(KERN_INFO "HVCS: vterm state unchanged. "
31323 "The hvcs device node is still in use.\n");
31324 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
31325 if ((retval = hvcs_partner_connect(hvcsd)))
31326 goto error_release;
31327
31328 - hvcsd->open_count = 1;
31329 + local_set(&hvcsd->open_count, 1);
31330 hvcsd->tty = tty;
31331 tty->driver_data = hvcsd;
31332
31333 @@ -1169,7 +1170,7 @@ fast_open:
31334
31335 spin_lock_irqsave(&hvcsd->lock, flags);
31336 kref_get(&hvcsd->kref);
31337 - hvcsd->open_count++;
31338 + local_inc(&hvcsd->open_count);
31339 hvcsd->todo_mask |= HVCS_SCHED_READ;
31340 spin_unlock_irqrestore(&hvcsd->lock, flags);
31341
31342 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31343 hvcsd = tty->driver_data;
31344
31345 spin_lock_irqsave(&hvcsd->lock, flags);
31346 - if (--hvcsd->open_count == 0) {
31347 + if (local_dec_and_test(&hvcsd->open_count)) {
31348
31349 vio_disable_interrupts(hvcsd->vdev);
31350
31351 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31352 free_irq(irq, hvcsd);
31353 kref_put(&hvcsd->kref, destroy_hvcs_struct);
31354 return;
31355 - } else if (hvcsd->open_count < 0) {
31356 + } else if (local_read(&hvcsd->open_count) < 0) {
31357 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
31358 " is missmanaged.\n",
31359 - hvcsd->vdev->unit_address, hvcsd->open_count);
31360 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
31361 }
31362
31363 spin_unlock_irqrestore(&hvcsd->lock, flags);
31364 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31365
31366 spin_lock_irqsave(&hvcsd->lock, flags);
31367 /* Preserve this so that we know how many kref refs to put */
31368 - temp_open_count = hvcsd->open_count;
31369 + temp_open_count = local_read(&hvcsd->open_count);
31370
31371 /*
31372 * Don't kref put inside the spinlock because the destruction
31373 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31374 hvcsd->tty->driver_data = NULL;
31375 hvcsd->tty = NULL;
31376
31377 - hvcsd->open_count = 0;
31378 + local_set(&hvcsd->open_count, 0);
31379
31380 /* This will drop any buffered data on the floor which is OK in a hangup
31381 * scenario. */
31382 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
31383 * the middle of a write operation? This is a crummy place to do this
31384 * but we want to keep it all in the spinlock.
31385 */
31386 - if (hvcsd->open_count <= 0) {
31387 + if (local_read(&hvcsd->open_count) <= 0) {
31388 spin_unlock_irqrestore(&hvcsd->lock, flags);
31389 return -ENODEV;
31390 }
31391 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31392 {
31393 struct hvcs_struct *hvcsd = tty->driver_data;
31394
31395 - if (!hvcsd || hvcsd->open_count <= 0)
31396 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31397 return 0;
31398
31399 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31400 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31401 index ec5e3f8..02455ba 100644
31402 --- a/drivers/char/ipmi/ipmi_msghandler.c
31403 +++ b/drivers/char/ipmi/ipmi_msghandler.c
31404 @@ -414,7 +414,7 @@ struct ipmi_smi {
31405 struct proc_dir_entry *proc_dir;
31406 char proc_dir_name[10];
31407
31408 - atomic_t stats[IPMI_NUM_STATS];
31409 + atomic_unchecked_t stats[IPMI_NUM_STATS];
31410
31411 /*
31412 * run_to_completion duplicate of smb_info, smi_info
31413 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31414
31415
31416 #define ipmi_inc_stat(intf, stat) \
31417 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31418 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31419 #define ipmi_get_stat(intf, stat) \
31420 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31421 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31422
31423 static int is_lan_addr(struct ipmi_addr *addr)
31424 {
31425 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31426 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31427 init_waitqueue_head(&intf->waitq);
31428 for (i = 0; i < IPMI_NUM_STATS; i++)
31429 - atomic_set(&intf->stats[i], 0);
31430 + atomic_set_unchecked(&intf->stats[i], 0);
31431
31432 intf->proc_dir = NULL;
31433
31434 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31435 struct ipmi_smi_msg smi_msg;
31436 struct ipmi_recv_msg recv_msg;
31437
31438 + pax_track_stack();
31439 +
31440 si = (struct ipmi_system_interface_addr *) &addr;
31441 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31442 si->channel = IPMI_BMC_CHANNEL;
31443 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31444 index abae8c9..8021979 100644
31445 --- a/drivers/char/ipmi/ipmi_si_intf.c
31446 +++ b/drivers/char/ipmi/ipmi_si_intf.c
31447 @@ -277,7 +277,7 @@ struct smi_info {
31448 unsigned char slave_addr;
31449
31450 /* Counters and things for the proc filesystem. */
31451 - atomic_t stats[SI_NUM_STATS];
31452 + atomic_unchecked_t stats[SI_NUM_STATS];
31453
31454 struct task_struct *thread;
31455
31456 @@ -285,9 +285,9 @@ struct smi_info {
31457 };
31458
31459 #define smi_inc_stat(smi, stat) \
31460 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31461 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31462 #define smi_get_stat(smi, stat) \
31463 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31464 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31465
31466 #define SI_MAX_PARMS 4
31467
31468 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31469 atomic_set(&new_smi->req_events, 0);
31470 new_smi->run_to_completion = 0;
31471 for (i = 0; i < SI_NUM_STATS; i++)
31472 - atomic_set(&new_smi->stats[i], 0);
31473 + atomic_set_unchecked(&new_smi->stats[i], 0);
31474
31475 new_smi->interrupt_disabled = 0;
31476 atomic_set(&new_smi->stop_operation, 0);
31477 diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31478 index 402838f..55e2200 100644
31479 --- a/drivers/char/istallion.c
31480 +++ b/drivers/char/istallion.c
31481 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31482 * re-used for each stats call.
31483 */
31484 static comstats_t stli_comstats;
31485 -static combrd_t stli_brdstats;
31486 static struct asystats stli_cdkstats;
31487
31488 /*****************************************************************************/
31489 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31490 {
31491 struct stlibrd *brdp;
31492 unsigned int i;
31493 + combrd_t stli_brdstats;
31494
31495 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31496 return -EFAULT;
31497 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31498 struct stliport stli_dummyport;
31499 struct stliport *portp;
31500
31501 + pax_track_stack();
31502 +
31503 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31504 return -EFAULT;
31505 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31506 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31507 struct stlibrd stli_dummybrd;
31508 struct stlibrd *brdp;
31509
31510 + pax_track_stack();
31511 +
31512 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31513 return -EFAULT;
31514 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31515 diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31516 index 950837c..e55a288 100644
31517 --- a/drivers/char/keyboard.c
31518 +++ b/drivers/char/keyboard.c
31519 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31520 kbd->kbdmode == VC_MEDIUMRAW) &&
31521 value != KVAL(K_SAK))
31522 return; /* SAK is allowed even in raw mode */
31523 +
31524 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31525 + {
31526 + void *func = fn_handler[value];
31527 + if (func == fn_show_state || func == fn_show_ptregs ||
31528 + func == fn_show_mem)
31529 + return;
31530 + }
31531 +#endif
31532 +
31533 fn_handler[value](vc);
31534 }
31535
31536 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31537 .evbit = { BIT_MASK(EV_SND) },
31538 },
31539
31540 - { }, /* Terminating entry */
31541 + { 0 }, /* Terminating entry */
31542 };
31543
31544 MODULE_DEVICE_TABLE(input, kbd_ids);
31545 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31546 index 87c67b4..230527a 100644
31547 --- a/drivers/char/mbcs.c
31548 +++ b/drivers/char/mbcs.c
31549 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31550 return 0;
31551 }
31552
31553 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31554 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31555 {
31556 .part_num = MBCS_PART_NUM,
31557 .mfg_num = MBCS_MFG_NUM,
31558 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31559 index 1270f64..8495f49 100644
31560 --- a/drivers/char/mem.c
31561 +++ b/drivers/char/mem.c
31562 @@ -18,6 +18,7 @@
31563 #include <linux/raw.h>
31564 #include <linux/tty.h>
31565 #include <linux/capability.h>
31566 +#include <linux/security.h>
31567 #include <linux/ptrace.h>
31568 #include <linux/device.h>
31569 #include <linux/highmem.h>
31570 @@ -35,6 +36,10 @@
31571 # include <linux/efi.h>
31572 #endif
31573
31574 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31575 +extern struct file_operations grsec_fops;
31576 +#endif
31577 +
31578 static inline unsigned long size_inside_page(unsigned long start,
31579 unsigned long size)
31580 {
31581 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31582
31583 while (cursor < to) {
31584 if (!devmem_is_allowed(pfn)) {
31585 +#ifdef CONFIG_GRKERNSEC_KMEM
31586 + gr_handle_mem_readwrite(from, to);
31587 +#else
31588 printk(KERN_INFO
31589 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31590 current->comm, from, to);
31591 +#endif
31592 return 0;
31593 }
31594 cursor += PAGE_SIZE;
31595 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31596 }
31597 return 1;
31598 }
31599 +#elif defined(CONFIG_GRKERNSEC_KMEM)
31600 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31601 +{
31602 + return 0;
31603 +}
31604 #else
31605 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31606 {
31607 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31608 #endif
31609
31610 while (count > 0) {
31611 + char *temp;
31612 +
31613 /*
31614 * Handle first page in case it's not aligned
31615 */
31616 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31617 if (!ptr)
31618 return -EFAULT;
31619
31620 - if (copy_to_user(buf, ptr, sz)) {
31621 +#ifdef CONFIG_PAX_USERCOPY
31622 + temp = kmalloc(sz, GFP_KERNEL);
31623 + if (!temp) {
31624 + unxlate_dev_mem_ptr(p, ptr);
31625 + return -ENOMEM;
31626 + }
31627 + memcpy(temp, ptr, sz);
31628 +#else
31629 + temp = ptr;
31630 +#endif
31631 +
31632 + if (copy_to_user(buf, temp, sz)) {
31633 +
31634 +#ifdef CONFIG_PAX_USERCOPY
31635 + kfree(temp);
31636 +#endif
31637 +
31638 unxlate_dev_mem_ptr(p, ptr);
31639 return -EFAULT;
31640 }
31641
31642 +#ifdef CONFIG_PAX_USERCOPY
31643 + kfree(temp);
31644 +#endif
31645 +
31646 unxlate_dev_mem_ptr(p, ptr);
31647
31648 buf += sz;
31649 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31650 size_t count, loff_t *ppos)
31651 {
31652 unsigned long p = *ppos;
31653 - ssize_t low_count, read, sz;
31654 + ssize_t low_count, read, sz, err = 0;
31655 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31656 - int err = 0;
31657
31658 read = 0;
31659 if (p < (unsigned long) high_memory) {
31660 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31661 }
31662 #endif
31663 while (low_count > 0) {
31664 + char *temp;
31665 +
31666 sz = size_inside_page(p, low_count);
31667
31668 /*
31669 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31670 */
31671 kbuf = xlate_dev_kmem_ptr((char *)p);
31672
31673 - if (copy_to_user(buf, kbuf, sz))
31674 +#ifdef CONFIG_PAX_USERCOPY
31675 + temp = kmalloc(sz, GFP_KERNEL);
31676 + if (!temp)
31677 + return -ENOMEM;
31678 + memcpy(temp, kbuf, sz);
31679 +#else
31680 + temp = kbuf;
31681 +#endif
31682 +
31683 + err = copy_to_user(buf, temp, sz);
31684 +
31685 +#ifdef CONFIG_PAX_USERCOPY
31686 + kfree(temp);
31687 +#endif
31688 +
31689 + if (err)
31690 return -EFAULT;
31691 buf += sz;
31692 p += sz;
31693 @@ -889,6 +941,9 @@ static const struct memdev {
31694 #ifdef CONFIG_CRASH_DUMP
31695 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31696 #endif
31697 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31698 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31699 +#endif
31700 };
31701
31702 static int memory_open(struct inode *inode, struct file *filp)
31703 diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
31704 index 918711a..4ffaf5e 100644
31705 --- a/drivers/char/mmtimer.c
31706 +++ b/drivers/char/mmtimer.c
31707 @@ -756,7 +756,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
31708 return err;
31709 }
31710
31711 -static struct k_clock sgi_clock = {
31712 +static k_clock_no_const sgi_clock = {
31713 .res = 0,
31714 .clock_set = sgi_clock_set,
31715 .clock_get = sgi_clock_get,
31716 diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31717 index 674b3ab..a8d1970 100644
31718 --- a/drivers/char/pcmcia/ipwireless/tty.c
31719 +++ b/drivers/char/pcmcia/ipwireless/tty.c
31720 @@ -29,6 +29,7 @@
31721 #include <linux/tty_driver.h>
31722 #include <linux/tty_flip.h>
31723 #include <linux/uaccess.h>
31724 +#include <asm/local.h>
31725
31726 #include "tty.h"
31727 #include "network.h"
31728 @@ -51,7 +52,7 @@ struct ipw_tty {
31729 int tty_type;
31730 struct ipw_network *network;
31731 struct tty_struct *linux_tty;
31732 - int open_count;
31733 + local_t open_count;
31734 unsigned int control_lines;
31735 struct mutex ipw_tty_mutex;
31736 int tx_bytes_queued;
31737 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31738 mutex_unlock(&tty->ipw_tty_mutex);
31739 return -ENODEV;
31740 }
31741 - if (tty->open_count == 0)
31742 + if (local_read(&tty->open_count) == 0)
31743 tty->tx_bytes_queued = 0;
31744
31745 - tty->open_count++;
31746 + local_inc(&tty->open_count);
31747
31748 tty->linux_tty = linux_tty;
31749 linux_tty->driver_data = tty;
31750 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31751
31752 static void do_ipw_close(struct ipw_tty *tty)
31753 {
31754 - tty->open_count--;
31755 -
31756 - if (tty->open_count == 0) {
31757 + if (local_dec_return(&tty->open_count) == 0) {
31758 struct tty_struct *linux_tty = tty->linux_tty;
31759
31760 if (linux_tty != NULL) {
31761 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31762 return;
31763
31764 mutex_lock(&tty->ipw_tty_mutex);
31765 - if (tty->open_count == 0) {
31766 + if (local_read(&tty->open_count) == 0) {
31767 mutex_unlock(&tty->ipw_tty_mutex);
31768 return;
31769 }
31770 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31771 return;
31772 }
31773
31774 - if (!tty->open_count) {
31775 + if (!local_read(&tty->open_count)) {
31776 mutex_unlock(&tty->ipw_tty_mutex);
31777 return;
31778 }
31779 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31780 return -ENODEV;
31781
31782 mutex_lock(&tty->ipw_tty_mutex);
31783 - if (!tty->open_count) {
31784 + if (!local_read(&tty->open_count)) {
31785 mutex_unlock(&tty->ipw_tty_mutex);
31786 return -EINVAL;
31787 }
31788 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31789 if (!tty)
31790 return -ENODEV;
31791
31792 - if (!tty->open_count)
31793 + if (!local_read(&tty->open_count))
31794 return -EINVAL;
31795
31796 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31797 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31798 if (!tty)
31799 return 0;
31800
31801 - if (!tty->open_count)
31802 + if (!local_read(&tty->open_count))
31803 return 0;
31804
31805 return tty->tx_bytes_queued;
31806 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31807 if (!tty)
31808 return -ENODEV;
31809
31810 - if (!tty->open_count)
31811 + if (!local_read(&tty->open_count))
31812 return -EINVAL;
31813
31814 return get_control_lines(tty);
31815 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31816 if (!tty)
31817 return -ENODEV;
31818
31819 - if (!tty->open_count)
31820 + if (!local_read(&tty->open_count))
31821 return -EINVAL;
31822
31823 return set_control_lines(tty, set, clear);
31824 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31825 if (!tty)
31826 return -ENODEV;
31827
31828 - if (!tty->open_count)
31829 + if (!local_read(&tty->open_count))
31830 return -EINVAL;
31831
31832 /* FIXME: Exactly how is the tty object locked here .. */
31833 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31834 against a parallel ioctl etc */
31835 mutex_lock(&ttyj->ipw_tty_mutex);
31836 }
31837 - while (ttyj->open_count)
31838 + while (local_read(&ttyj->open_count))
31839 do_ipw_close(ttyj);
31840 ipwireless_disassociate_network_ttys(network,
31841 ttyj->channel_idx);
31842 diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31843 index 62f282e..e45c45c 100644
31844 --- a/drivers/char/pty.c
31845 +++ b/drivers/char/pty.c
31846 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31847 register_sysctl_table(pty_root_table);
31848
31849 /* Now create the /dev/ptmx special device */
31850 + pax_open_kernel();
31851 tty_default_fops(&ptmx_fops);
31852 - ptmx_fops.open = ptmx_open;
31853 + *(void **)&ptmx_fops.open = ptmx_open;
31854 + pax_close_kernel();
31855
31856 cdev_init(&ptmx_cdev, &ptmx_fops);
31857 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31858 diff --git a/drivers/char/random.c b/drivers/char/random.c
31859 index 3a19e2d..6ed09d3 100644
31860 --- a/drivers/char/random.c
31861 +++ b/drivers/char/random.c
31862 @@ -254,8 +254,13 @@
31863 /*
31864 * Configuration information
31865 */
31866 +#ifdef CONFIG_GRKERNSEC_RANDNET
31867 +#define INPUT_POOL_WORDS 512
31868 +#define OUTPUT_POOL_WORDS 128
31869 +#else
31870 #define INPUT_POOL_WORDS 128
31871 #define OUTPUT_POOL_WORDS 32
31872 +#endif
31873 #define SEC_XFER_SIZE 512
31874
31875 /*
31876 @@ -292,10 +297,17 @@ static struct poolinfo {
31877 int poolwords;
31878 int tap1, tap2, tap3, tap4, tap5;
31879 } poolinfo_table[] = {
31880 +#ifdef CONFIG_GRKERNSEC_RANDNET
31881 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31882 + { 512, 411, 308, 208, 104, 1 },
31883 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31884 + { 128, 103, 76, 51, 25, 1 },
31885 +#else
31886 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31887 { 128, 103, 76, 51, 25, 1 },
31888 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31889 { 32, 26, 20, 14, 7, 1 },
31890 +#endif
31891 #if 0
31892 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31893 { 2048, 1638, 1231, 819, 411, 1 },
31894 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31895 #include <linux/sysctl.h>
31896
31897 static int min_read_thresh = 8, min_write_thresh;
31898 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
31899 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31900 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31901 static char sysctl_bootid[16];
31902
31903 diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31904 index 0e29a23..0efc2c2 100644
31905 --- a/drivers/char/rocket.c
31906 +++ b/drivers/char/rocket.c
31907 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31908 struct rocket_ports tmp;
31909 int board;
31910
31911 + pax_track_stack();
31912 +
31913 if (!retports)
31914 return -EFAULT;
31915 memset(&tmp, 0, sizeof (tmp));
31916 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31917 index 8c262aa..4d3b058 100644
31918 --- a/drivers/char/sonypi.c
31919 +++ b/drivers/char/sonypi.c
31920 @@ -55,6 +55,7 @@
31921 #include <asm/uaccess.h>
31922 #include <asm/io.h>
31923 #include <asm/system.h>
31924 +#include <asm/local.h>
31925
31926 #include <linux/sonypi.h>
31927
31928 @@ -491,7 +492,7 @@ static struct sonypi_device {
31929 spinlock_t fifo_lock;
31930 wait_queue_head_t fifo_proc_list;
31931 struct fasync_struct *fifo_async;
31932 - int open_count;
31933 + local_t open_count;
31934 int model;
31935 struct input_dev *input_jog_dev;
31936 struct input_dev *input_key_dev;
31937 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31938 static int sonypi_misc_release(struct inode *inode, struct file *file)
31939 {
31940 mutex_lock(&sonypi_device.lock);
31941 - sonypi_device.open_count--;
31942 + local_dec(&sonypi_device.open_count);
31943 mutex_unlock(&sonypi_device.lock);
31944 return 0;
31945 }
31946 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31947 lock_kernel();
31948 mutex_lock(&sonypi_device.lock);
31949 /* Flush input queue on first open */
31950 - if (!sonypi_device.open_count)
31951 + if (!local_read(&sonypi_device.open_count))
31952 kfifo_reset(sonypi_device.fifo);
31953 - sonypi_device.open_count++;
31954 + local_inc(&sonypi_device.open_count);
31955 mutex_unlock(&sonypi_device.lock);
31956 unlock_kernel();
31957 return 0;
31958 diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31959 index db6dcfa..13834cb 100644
31960 --- a/drivers/char/stallion.c
31961 +++ b/drivers/char/stallion.c
31962 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31963 struct stlport stl_dummyport;
31964 struct stlport *portp;
31965
31966 + pax_track_stack();
31967 +
31968 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31969 return -EFAULT;
31970 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31971 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31972 index a0789f6..cea3902 100644
31973 --- a/drivers/char/tpm/tpm.c
31974 +++ b/drivers/char/tpm/tpm.c
31975 @@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31976 chip->vendor.req_complete_val)
31977 goto out_recv;
31978
31979 - if ((status == chip->vendor.req_canceled)) {
31980 + if (status == chip->vendor.req_canceled) {
31981 dev_err(chip->dev, "Operation Canceled\n");
31982 rc = -ECANCELED;
31983 goto out;
31984 @@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31985
31986 struct tpm_chip *chip = dev_get_drvdata(dev);
31987
31988 + pax_track_stack();
31989 +
31990 tpm_cmd.header.in = tpm_readpubek_header;
31991 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31992 "attempting to read the PUBEK");
31993 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31994 index bf2170f..ce8cab9 100644
31995 --- a/drivers/char/tpm/tpm_bios.c
31996 +++ b/drivers/char/tpm/tpm_bios.c
31997 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31998 event = addr;
31999
32000 if ((event->event_type == 0 && event->event_size == 0) ||
32001 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
32002 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
32003 return NULL;
32004
32005 return addr;
32006 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
32007 return NULL;
32008
32009 if ((event->event_type == 0 && event->event_size == 0) ||
32010 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
32011 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
32012 return NULL;
32013
32014 (*pos)++;
32015 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
32016 int i;
32017
32018 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
32019 - seq_putc(m, data[i]);
32020 + if (!seq_putc(m, data[i]))
32021 + return -EFAULT;
32022
32023 return 0;
32024 }
32025 @@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
32026 log->bios_event_log_end = log->bios_event_log + len;
32027
32028 virt = acpi_os_map_memory(start, len);
32029 + if (!virt) {
32030 + kfree(log->bios_event_log);
32031 + log->bios_event_log = NULL;
32032 + return -EFAULT;
32033 + }
32034
32035 - memcpy(log->bios_event_log, virt, len);
32036 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
32037
32038 acpi_os_unmap_memory(virt, len);
32039 return 0;
32040 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
32041 index 123cedf..6664cb4 100644
32042 --- a/drivers/char/tty_io.c
32043 +++ b/drivers/char/tty_io.c
32044 @@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
32045 static int tty_release(struct inode *, struct file *);
32046 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
32047 #ifdef CONFIG_COMPAT
32048 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32049 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
32050 unsigned long arg);
32051 #else
32052 #define tty_compat_ioctl NULL
32053 @@ -1774,6 +1774,7 @@ got_driver:
32054
32055 if (IS_ERR(tty)) {
32056 mutex_unlock(&tty_mutex);
32057 + tty_driver_kref_put(driver);
32058 return PTR_ERR(tty);
32059 }
32060 }
32061 @@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
32062 return retval;
32063 }
32064
32065 +EXPORT_SYMBOL(tty_ioctl);
32066 +
32067 #ifdef CONFIG_COMPAT
32068 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32069 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
32070 unsigned long arg)
32071 {
32072 struct inode *inode = file->f_dentry->d_inode;
32073 @@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32074
32075 return retval;
32076 }
32077 +
32078 +EXPORT_SYMBOL(tty_compat_ioctl);
32079 #endif
32080
32081 /*
32082 @@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
32083
32084 void tty_default_fops(struct file_operations *fops)
32085 {
32086 - *fops = tty_fops;
32087 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
32088 }
32089
32090 /*
32091 diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
32092 index d814a3d..b55b9c9 100644
32093 --- a/drivers/char/tty_ldisc.c
32094 +++ b/drivers/char/tty_ldisc.c
32095 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
32096 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
32097 struct tty_ldisc_ops *ldo = ld->ops;
32098
32099 - ldo->refcount--;
32100 + atomic_dec(&ldo->refcount);
32101 module_put(ldo->owner);
32102 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32103
32104 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
32105 spin_lock_irqsave(&tty_ldisc_lock, flags);
32106 tty_ldiscs[disc] = new_ldisc;
32107 new_ldisc->num = disc;
32108 - new_ldisc->refcount = 0;
32109 + atomic_set(&new_ldisc->refcount, 0);
32110 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32111
32112 return ret;
32113 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
32114 return -EINVAL;
32115
32116 spin_lock_irqsave(&tty_ldisc_lock, flags);
32117 - if (tty_ldiscs[disc]->refcount)
32118 + if (atomic_read(&tty_ldiscs[disc]->refcount))
32119 ret = -EBUSY;
32120 else
32121 tty_ldiscs[disc] = NULL;
32122 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
32123 if (ldops) {
32124 ret = ERR_PTR(-EAGAIN);
32125 if (try_module_get(ldops->owner)) {
32126 - ldops->refcount++;
32127 + atomic_inc(&ldops->refcount);
32128 ret = ldops;
32129 }
32130 }
32131 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
32132 unsigned long flags;
32133
32134 spin_lock_irqsave(&tty_ldisc_lock, flags);
32135 - ldops->refcount--;
32136 + atomic_dec(&ldops->refcount);
32137 module_put(ldops->owner);
32138 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32139 }
32140 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
32141 index a035ae3..c27fe2c 100644
32142 --- a/drivers/char/virtio_console.c
32143 +++ b/drivers/char/virtio_console.c
32144 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
32145 * virtqueue, so we let the drivers do some boutique early-output thing. */
32146 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
32147 {
32148 - virtio_cons.put_chars = put_chars;
32149 + pax_open_kernel();
32150 + *(void **)&virtio_cons.put_chars = put_chars;
32151 + pax_close_kernel();
32152 return hvc_instantiate(0, 0, &virtio_cons);
32153 }
32154
32155 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
32156 out_vq = vqs[1];
32157
32158 /* Start using the new console output. */
32159 - virtio_cons.get_chars = get_chars;
32160 - virtio_cons.put_chars = put_chars;
32161 - virtio_cons.notifier_add = notifier_add_vio;
32162 - virtio_cons.notifier_del = notifier_del_vio;
32163 - virtio_cons.notifier_hangup = notifier_del_vio;
32164 + pax_open_kernel();
32165 + *(void **)&virtio_cons.get_chars = get_chars;
32166 + *(void **)&virtio_cons.put_chars = put_chars;
32167 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
32168 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
32169 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
32170 + pax_close_kernel();
32171
32172 /* The first argument of hvc_alloc() is the virtual console number, so
32173 * we use zero. The second argument is the parameter for the
32174 diff --git a/drivers/char/vt.c b/drivers/char/vt.c
32175 index 0c80c68..53d59c1 100644
32176 --- a/drivers/char/vt.c
32177 +++ b/drivers/char/vt.c
32178 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
32179
32180 static void notify_write(struct vc_data *vc, unsigned int unicode)
32181 {
32182 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
32183 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
32184 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
32185 }
32186
32187 diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
32188 index 6351a26..999af95 100644
32189 --- a/drivers/char/vt_ioctl.c
32190 +++ b/drivers/char/vt_ioctl.c
32191 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32192 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
32193 return -EFAULT;
32194
32195 - if (!capable(CAP_SYS_TTY_CONFIG))
32196 - perm = 0;
32197 -
32198 switch (cmd) {
32199 case KDGKBENT:
32200 key_map = key_maps[s];
32201 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32202 val = (i ? K_HOLE : K_NOSUCHMAP);
32203 return put_user(val, &user_kbe->kb_value);
32204 case KDSKBENT:
32205 + if (!capable(CAP_SYS_TTY_CONFIG))
32206 + perm = 0;
32207 +
32208 if (!perm)
32209 return -EPERM;
32210 +
32211 if (!i && v == K_NOSUCHMAP) {
32212 /* deallocate map */
32213 key_map = key_maps[s];
32214 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32215 int i, j, k;
32216 int ret;
32217
32218 - if (!capable(CAP_SYS_TTY_CONFIG))
32219 - perm = 0;
32220 -
32221 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
32222 if (!kbs) {
32223 ret = -ENOMEM;
32224 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32225 kfree(kbs);
32226 return ((p && *p) ? -EOVERFLOW : 0);
32227 case KDSKBSENT:
32228 + if (!capable(CAP_SYS_TTY_CONFIG))
32229 + perm = 0;
32230 +
32231 if (!perm) {
32232 ret = -EPERM;
32233 goto reterr;
32234 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
32235 index c7ae026..1769c1d 100644
32236 --- a/drivers/cpufreq/cpufreq.c
32237 +++ b/drivers/cpufreq/cpufreq.c
32238 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
32239 complete(&policy->kobj_unregister);
32240 }
32241
32242 -static struct sysfs_ops sysfs_ops = {
32243 +static const struct sysfs_ops sysfs_ops = {
32244 .show = show,
32245 .store = store,
32246 };
32247 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
32248 index 97b0038..2056670 100644
32249 --- a/drivers/cpuidle/sysfs.c
32250 +++ b/drivers/cpuidle/sysfs.c
32251 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
32252 return ret;
32253 }
32254
32255 -static struct sysfs_ops cpuidle_sysfs_ops = {
32256 +static const struct sysfs_ops cpuidle_sysfs_ops = {
32257 .show = cpuidle_show,
32258 .store = cpuidle_store,
32259 };
32260 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
32261 return ret;
32262 }
32263
32264 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
32265 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
32266 .show = cpuidle_state_show,
32267 };
32268
32269 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
32270 .release = cpuidle_state_sysfs_release,
32271 };
32272
32273 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32274 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32275 {
32276 kobject_put(&device->kobjs[i]->kobj);
32277 wait_for_completion(&device->kobjs[i]->kobj_unregister);
32278 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
32279 index 5f753fc..0377ae9 100644
32280 --- a/drivers/crypto/hifn_795x.c
32281 +++ b/drivers/crypto/hifn_795x.c
32282 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
32283 0xCA, 0x34, 0x2B, 0x2E};
32284 struct scatterlist sg;
32285
32286 + pax_track_stack();
32287 +
32288 memset(src, 0, sizeof(src));
32289 memset(ctx.key, 0, sizeof(ctx.key));
32290
32291 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
32292 index 71e6482..de8d96c 100644
32293 --- a/drivers/crypto/padlock-aes.c
32294 +++ b/drivers/crypto/padlock-aes.c
32295 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
32296 struct crypto_aes_ctx gen_aes;
32297 int cpu;
32298
32299 + pax_track_stack();
32300 +
32301 if (key_len % 8) {
32302 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
32303 return -EINVAL;
32304 diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
32305 index dcc4ab7..cc834bb 100644
32306 --- a/drivers/dma/ioat/dma.c
32307 +++ b/drivers/dma/ioat/dma.c
32308 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
32309 return entry->show(&chan->common, page);
32310 }
32311
32312 -struct sysfs_ops ioat_sysfs_ops = {
32313 +const struct sysfs_ops ioat_sysfs_ops = {
32314 .show = ioat_attr_show,
32315 };
32316
32317 diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
32318 index bbc3e78..f2db62c 100644
32319 --- a/drivers/dma/ioat/dma.h
32320 +++ b/drivers/dma/ioat/dma.h
32321 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
32322 unsigned long *phys_complete);
32323 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
32324 void ioat_kobject_del(struct ioatdma_device *device);
32325 -extern struct sysfs_ops ioat_sysfs_ops;
32326 +extern const struct sysfs_ops ioat_sysfs_ops;
32327 extern struct ioat_sysfs_entry ioat_version_attr;
32328 extern struct ioat_sysfs_entry ioat_cap_attr;
32329 #endif /* IOATDMA_H */
32330 diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
32331 index 9908c9e..3ceb0e5 100644
32332 --- a/drivers/dma/ioat/dma_v3.c
32333 +++ b/drivers/dma/ioat/dma_v3.c
32334 @@ -71,10 +71,10 @@
32335 /* provide a lookup table for setting the source address in the base or
32336 * extended descriptor of an xor or pq descriptor
32337 */
32338 -static const u8 xor_idx_to_desc __read_mostly = 0xd0;
32339 -static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
32340 -static const u8 pq_idx_to_desc __read_mostly = 0xf8;
32341 -static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
32342 +static const u8 xor_idx_to_desc = 0xd0;
32343 +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
32344 +static const u8 pq_idx_to_desc = 0xf8;
32345 +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
32346
32347 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
32348 {
32349 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
32350 index 85c464a..afd1e73 100644
32351 --- a/drivers/edac/amd64_edac.c
32352 +++ b/drivers/edac/amd64_edac.c
32353 @@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
32354 * PCI core identifies what devices are on a system during boot, and then
32355 * inquiry this table to see if this driver is for a given device found.
32356 */
32357 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
32358 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
32359 {
32360 .vendor = PCI_VENDOR_ID_AMD,
32361 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
32362 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
32363 index 2b95f1a..4f52793 100644
32364 --- a/drivers/edac/amd76x_edac.c
32365 +++ b/drivers/edac/amd76x_edac.c
32366 @@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
32367 edac_mc_free(mci);
32368 }
32369
32370 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
32371 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
32372 {
32373 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32374 AMD762},
32375 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
32376 index d205d49..74c9672 100644
32377 --- a/drivers/edac/e752x_edac.c
32378 +++ b/drivers/edac/e752x_edac.c
32379 @@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
32380 edac_mc_free(mci);
32381 }
32382
32383 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
32384 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
32385 {
32386 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32387 E7520},
32388 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
32389 index c7d11cc..c59c1ca 100644
32390 --- a/drivers/edac/e7xxx_edac.c
32391 +++ b/drivers/edac/e7xxx_edac.c
32392 @@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
32393 edac_mc_free(mci);
32394 }
32395
32396 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
32397 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
32398 {
32399 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32400 E7205},
32401 diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
32402 index 5376457..5fdedbc 100644
32403 --- a/drivers/edac/edac_device_sysfs.c
32404 +++ b/drivers/edac/edac_device_sysfs.c
32405 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
32406 }
32407
32408 /* edac_dev file operations for an 'ctl_info' */
32409 -static struct sysfs_ops device_ctl_info_ops = {
32410 +static const struct sysfs_ops device_ctl_info_ops = {
32411 .show = edac_dev_ctl_info_show,
32412 .store = edac_dev_ctl_info_store
32413 };
32414 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32415 }
32416
32417 /* edac_dev file operations for an 'instance' */
32418 -static struct sysfs_ops device_instance_ops = {
32419 +static const struct sysfs_ops device_instance_ops = {
32420 .show = edac_dev_instance_show,
32421 .store = edac_dev_instance_store
32422 };
32423 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32424 }
32425
32426 /* edac_dev file operations for a 'block' */
32427 -static struct sysfs_ops device_block_ops = {
32428 +static const struct sysfs_ops device_block_ops = {
32429 .show = edac_dev_block_show,
32430 .store = edac_dev_block_store
32431 };
32432 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32433 index e1d4ce0..88840e9 100644
32434 --- a/drivers/edac/edac_mc_sysfs.c
32435 +++ b/drivers/edac/edac_mc_sysfs.c
32436 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32437 return -EIO;
32438 }
32439
32440 -static struct sysfs_ops csrowfs_ops = {
32441 +static const struct sysfs_ops csrowfs_ops = {
32442 .show = csrowdev_show,
32443 .store = csrowdev_store
32444 };
32445 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32446 }
32447
32448 /* Intermediate show/store table */
32449 -static struct sysfs_ops mci_ops = {
32450 +static const struct sysfs_ops mci_ops = {
32451 .show = mcidev_show,
32452 .store = mcidev_store
32453 };
32454 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32455 index 422728c..d8d9c88 100644
32456 --- a/drivers/edac/edac_pci_sysfs.c
32457 +++ b/drivers/edac/edac_pci_sysfs.c
32458 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32459 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32460 static int edac_pci_poll_msec = 1000; /* one second workq period */
32461
32462 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
32463 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32464 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32465 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32466
32467 static struct kobject *edac_pci_top_main_kobj;
32468 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32469 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32470 }
32471
32472 /* fs_ops table */
32473 -static struct sysfs_ops pci_instance_ops = {
32474 +static const struct sysfs_ops pci_instance_ops = {
32475 .show = edac_pci_instance_show,
32476 .store = edac_pci_instance_store
32477 };
32478 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32479 return -EIO;
32480 }
32481
32482 -static struct sysfs_ops edac_pci_sysfs_ops = {
32483 +static const struct sysfs_ops edac_pci_sysfs_ops = {
32484 .show = edac_pci_dev_show,
32485 .store = edac_pci_dev_store
32486 };
32487 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32488 edac_printk(KERN_CRIT, EDAC_PCI,
32489 "Signaled System Error on %s\n",
32490 pci_name(dev));
32491 - atomic_inc(&pci_nonparity_count);
32492 + atomic_inc_unchecked(&pci_nonparity_count);
32493 }
32494
32495 if (status & (PCI_STATUS_PARITY)) {
32496 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32497 "Master Data Parity Error on %s\n",
32498 pci_name(dev));
32499
32500 - atomic_inc(&pci_parity_count);
32501 + atomic_inc_unchecked(&pci_parity_count);
32502 }
32503
32504 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32505 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32506 "Detected Parity Error on %s\n",
32507 pci_name(dev));
32508
32509 - atomic_inc(&pci_parity_count);
32510 + atomic_inc_unchecked(&pci_parity_count);
32511 }
32512 }
32513
32514 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32515 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32516 "Signaled System Error on %s\n",
32517 pci_name(dev));
32518 - atomic_inc(&pci_nonparity_count);
32519 + atomic_inc_unchecked(&pci_nonparity_count);
32520 }
32521
32522 if (status & (PCI_STATUS_PARITY)) {
32523 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32524 "Master Data Parity Error on "
32525 "%s\n", pci_name(dev));
32526
32527 - atomic_inc(&pci_parity_count);
32528 + atomic_inc_unchecked(&pci_parity_count);
32529 }
32530
32531 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32532 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32533 "Detected Parity Error on %s\n",
32534 pci_name(dev));
32535
32536 - atomic_inc(&pci_parity_count);
32537 + atomic_inc_unchecked(&pci_parity_count);
32538 }
32539 }
32540 }
32541 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32542 if (!check_pci_errors)
32543 return;
32544
32545 - before_count = atomic_read(&pci_parity_count);
32546 + before_count = atomic_read_unchecked(&pci_parity_count);
32547
32548 /* scan all PCI devices looking for a Parity Error on devices and
32549 * bridges.
32550 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32551 /* Only if operator has selected panic on PCI Error */
32552 if (edac_pci_get_panic_on_pe()) {
32553 /* If the count is different 'after' from 'before' */
32554 - if (before_count != atomic_read(&pci_parity_count))
32555 + if (before_count != atomic_read_unchecked(&pci_parity_count))
32556 panic("EDAC: PCI Parity Error");
32557 }
32558 }
32559 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32560 index 6c9a0f2..9c1cf7e 100644
32561 --- a/drivers/edac/i3000_edac.c
32562 +++ b/drivers/edac/i3000_edac.c
32563 @@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32564 edac_mc_free(mci);
32565 }
32566
32567 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32568 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32569 {
32570 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32571 I3000},
32572 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32573 index fde4db9..fe108f9 100644
32574 --- a/drivers/edac/i3200_edac.c
32575 +++ b/drivers/edac/i3200_edac.c
32576 @@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32577 edac_mc_free(mci);
32578 }
32579
32580 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32581 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32582 {
32583 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32584 I3200},
32585 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32586 index adc10a2..57d4ccf 100644
32587 --- a/drivers/edac/i5000_edac.c
32588 +++ b/drivers/edac/i5000_edac.c
32589 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32590 *
32591 * The "E500P" device is the first device supported.
32592 */
32593 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32594 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32595 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32596 .driver_data = I5000P},
32597
32598 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32599 index 22db05a..b2b5503 100644
32600 --- a/drivers/edac/i5100_edac.c
32601 +++ b/drivers/edac/i5100_edac.c
32602 @@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32603 edac_mc_free(mci);
32604 }
32605
32606 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32607 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32608 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32609 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32610 { 0, }
32611 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32612 index f99d106..f050710 100644
32613 --- a/drivers/edac/i5400_edac.c
32614 +++ b/drivers/edac/i5400_edac.c
32615 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32616 *
32617 * The "E500P" device is the first device supported.
32618 */
32619 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32620 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32621 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32622 {0,} /* 0 terminated list. */
32623 };
32624 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32625 index 577760a..9ce16ce 100644
32626 --- a/drivers/edac/i82443bxgx_edac.c
32627 +++ b/drivers/edac/i82443bxgx_edac.c
32628 @@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32629
32630 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32631
32632 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32633 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32634 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32635 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32636 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32637 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32638 index c0088ba..64a7b98 100644
32639 --- a/drivers/edac/i82860_edac.c
32640 +++ b/drivers/edac/i82860_edac.c
32641 @@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32642 edac_mc_free(mci);
32643 }
32644
32645 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32646 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32647 {
32648 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32649 I82860},
32650 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32651 index b2d83b9..a34357b 100644
32652 --- a/drivers/edac/i82875p_edac.c
32653 +++ b/drivers/edac/i82875p_edac.c
32654 @@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32655 edac_mc_free(mci);
32656 }
32657
32658 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32659 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32660 {
32661 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32662 I82875P},
32663 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32664 index 2eed3ea..87bbbd1 100644
32665 --- a/drivers/edac/i82975x_edac.c
32666 +++ b/drivers/edac/i82975x_edac.c
32667 @@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32668 edac_mc_free(mci);
32669 }
32670
32671 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32672 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32673 {
32674 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32675 I82975X
32676 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32677 index 9900675..78ac2b6 100644
32678 --- a/drivers/edac/r82600_edac.c
32679 +++ b/drivers/edac/r82600_edac.c
32680 @@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32681 edac_mc_free(mci);
32682 }
32683
32684 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32685 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32686 {
32687 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32688 },
32689 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32690 index d4ec605..4cfec4e 100644
32691 --- a/drivers/edac/x38_edac.c
32692 +++ b/drivers/edac/x38_edac.c
32693 @@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32694 edac_mc_free(mci);
32695 }
32696
32697 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32698 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32699 {
32700 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32701 X38},
32702 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32703 index 3fc2ceb..daf098f 100644
32704 --- a/drivers/firewire/core-card.c
32705 +++ b/drivers/firewire/core-card.c
32706 @@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32707
32708 void fw_core_remove_card(struct fw_card *card)
32709 {
32710 - struct fw_card_driver dummy_driver = dummy_driver_template;
32711 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
32712
32713 card->driver->update_phy_reg(card, 4,
32714 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32715 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32716 index 4560d8f..36db24a 100644
32717 --- a/drivers/firewire/core-cdev.c
32718 +++ b/drivers/firewire/core-cdev.c
32719 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32720 int ret;
32721
32722 if ((request->channels == 0 && request->bandwidth == 0) ||
32723 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32724 - request->bandwidth < 0)
32725 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32726 return -EINVAL;
32727
32728 r = kmalloc(sizeof(*r), GFP_KERNEL);
32729 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32730 index da628c7..cf54a2c 100644
32731 --- a/drivers/firewire/core-transaction.c
32732 +++ b/drivers/firewire/core-transaction.c
32733 @@ -36,6 +36,7 @@
32734 #include <linux/string.h>
32735 #include <linux/timer.h>
32736 #include <linux/types.h>
32737 +#include <linux/sched.h>
32738
32739 #include <asm/byteorder.h>
32740
32741 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32742 struct transaction_callback_data d;
32743 struct fw_transaction t;
32744
32745 + pax_track_stack();
32746 +
32747 init_completion(&d.done);
32748 d.payload = payload;
32749 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32750 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32751 index 7ff6e75..a2965d9 100644
32752 --- a/drivers/firewire/core.h
32753 +++ b/drivers/firewire/core.h
32754 @@ -86,6 +86,7 @@ struct fw_card_driver {
32755
32756 int (*stop_iso)(struct fw_iso_context *ctx);
32757 };
32758 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32759
32760 void fw_card_initialize(struct fw_card *card,
32761 const struct fw_card_driver *driver, struct device *device);
32762 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32763 index 3a2ccb0..82fd7c4 100644
32764 --- a/drivers/firmware/dmi_scan.c
32765 +++ b/drivers/firmware/dmi_scan.c
32766 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32767 }
32768 }
32769 else {
32770 - /*
32771 - * no iounmap() for that ioremap(); it would be a no-op, but
32772 - * it's so early in setup that sucker gets confused into doing
32773 - * what it shouldn't if we actually call it.
32774 - */
32775 p = dmi_ioremap(0xF0000, 0x10000);
32776 if (p == NULL)
32777 goto error;
32778 @@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32779 if (buf == NULL)
32780 return -1;
32781
32782 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32783 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32784
32785 iounmap(buf);
32786 return 0;
32787 diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32788 index 9e4f59d..110e24e 100644
32789 --- a/drivers/firmware/edd.c
32790 +++ b/drivers/firmware/edd.c
32791 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32792 return ret;
32793 }
32794
32795 -static struct sysfs_ops edd_attr_ops = {
32796 +static const struct sysfs_ops edd_attr_ops = {
32797 .show = edd_attr_show,
32798 };
32799
32800 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32801 index f4f709d..082f06e 100644
32802 --- a/drivers/firmware/efivars.c
32803 +++ b/drivers/firmware/efivars.c
32804 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32805 return ret;
32806 }
32807
32808 -static struct sysfs_ops efivar_attr_ops = {
32809 +static const struct sysfs_ops efivar_attr_ops = {
32810 .show = efivar_attr_show,
32811 .store = efivar_attr_store,
32812 };
32813 diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32814 index 051d1eb..0a5d4e7 100644
32815 --- a/drivers/firmware/iscsi_ibft.c
32816 +++ b/drivers/firmware/iscsi_ibft.c
32817 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32818 return ret;
32819 }
32820
32821 -static struct sysfs_ops ibft_attr_ops = {
32822 +static const struct sysfs_ops ibft_attr_ops = {
32823 .show = ibft_show_attribute,
32824 };
32825
32826 diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32827 index 56f9234..8c58c7b 100644
32828 --- a/drivers/firmware/memmap.c
32829 +++ b/drivers/firmware/memmap.c
32830 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32831 NULL
32832 };
32833
32834 -static struct sysfs_ops memmap_attr_ops = {
32835 +static const struct sysfs_ops memmap_attr_ops = {
32836 .show = memmap_attr_show,
32837 };
32838
32839 diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32840 index b16c9a8..2af7d3f 100644
32841 --- a/drivers/gpio/vr41xx_giu.c
32842 +++ b/drivers/gpio/vr41xx_giu.c
32843 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32844 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32845 maskl, pendl, maskh, pendh);
32846
32847 - atomic_inc(&irq_err_count);
32848 + atomic_inc_unchecked(&irq_err_count);
32849
32850 return -EINVAL;
32851 }
32852 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32853 index bea6efc..3dc0f42 100644
32854 --- a/drivers/gpu/drm/drm_crtc.c
32855 +++ b/drivers/gpu/drm/drm_crtc.c
32856 @@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32857 */
32858 if ((out_resp->count_modes >= mode_count) && mode_count) {
32859 copied = 0;
32860 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32861 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32862 list_for_each_entry(mode, &connector->modes, head) {
32863 drm_crtc_convert_to_umode(&u_mode, mode);
32864 if (copy_to_user(mode_ptr + copied,
32865 @@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32866
32867 if ((out_resp->count_props >= props_count) && props_count) {
32868 copied = 0;
32869 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32870 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32871 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32872 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32873 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32874 if (connector->property_ids[i] != 0) {
32875 if (put_user(connector->property_ids[i],
32876 @@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32877
32878 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32879 copied = 0;
32880 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32881 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32882 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32883 if (connector->encoder_ids[i] != 0) {
32884 if (put_user(connector->encoder_ids[i],
32885 @@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32886 }
32887
32888 for (i = 0; i < crtc_req->count_connectors; i++) {
32889 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32890 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32891 if (get_user(out_id, &set_connectors_ptr[i])) {
32892 ret = -EFAULT;
32893 goto out;
32894 @@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32895 out_resp->flags = property->flags;
32896
32897 if ((out_resp->count_values >= value_count) && value_count) {
32898 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32899 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32900 for (i = 0; i < value_count; i++) {
32901 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32902 ret = -EFAULT;
32903 @@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32904 if (property->flags & DRM_MODE_PROP_ENUM) {
32905 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32906 copied = 0;
32907 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32908 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32909 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32910
32911 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32912 @@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32913 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32914 copied = 0;
32915 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32916 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32917 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32918
32919 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32920 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32921 @@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32922 blob = obj_to_blob(obj);
32923
32924 if (out_resp->length == blob->length) {
32925 - blob_ptr = (void *)(unsigned long)out_resp->data;
32926 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
32927 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32928 ret = -EFAULT;
32929 goto done;
32930 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32931 index 1b8745d..92fdbf6 100644
32932 --- a/drivers/gpu/drm/drm_crtc_helper.c
32933 +++ b/drivers/gpu/drm/drm_crtc_helper.c
32934 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32935 struct drm_crtc *tmp;
32936 int crtc_mask = 1;
32937
32938 - WARN(!crtc, "checking null crtc?");
32939 + BUG_ON(!crtc);
32940
32941 dev = crtc->dev;
32942
32943 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32944
32945 adjusted_mode = drm_mode_duplicate(dev, mode);
32946
32947 + pax_track_stack();
32948 +
32949 crtc->enabled = drm_helper_crtc_in_use(crtc);
32950
32951 if (!crtc->enabled)
32952 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32953 index 0e27d98..dec8768 100644
32954 --- a/drivers/gpu/drm/drm_drv.c
32955 +++ b/drivers/gpu/drm/drm_drv.c
32956 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32957 char *kdata = NULL;
32958
32959 atomic_inc(&dev->ioctl_count);
32960 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32961 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32962 ++file_priv->ioctl_count;
32963
32964 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32965 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32966 index 519161e..98c840c 100644
32967 --- a/drivers/gpu/drm/drm_fops.c
32968 +++ b/drivers/gpu/drm/drm_fops.c
32969 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32970 }
32971
32972 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32973 - atomic_set(&dev->counts[i], 0);
32974 + atomic_set_unchecked(&dev->counts[i], 0);
32975
32976 dev->sigdata.lock = NULL;
32977
32978 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32979
32980 retcode = drm_open_helper(inode, filp, dev);
32981 if (!retcode) {
32982 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32983 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32984 spin_lock(&dev->count_lock);
32985 - if (!dev->open_count++) {
32986 + if (local_inc_return(&dev->open_count) == 1) {
32987 spin_unlock(&dev->count_lock);
32988 retcode = drm_setup(dev);
32989 goto out;
32990 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32991
32992 lock_kernel();
32993
32994 - DRM_DEBUG("open_count = %d\n", dev->open_count);
32995 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32996
32997 if (dev->driver->preclose)
32998 dev->driver->preclose(dev, file_priv);
32999 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
33000 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
33001 task_pid_nr(current),
33002 (long)old_encode_dev(file_priv->minor->device),
33003 - dev->open_count);
33004 + local_read(&dev->open_count));
33005
33006 /* Release any auth tokens that might point to this file_priv,
33007 (do that under the drm_global_mutex) */
33008 @@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
33009 * End inline drm_release
33010 */
33011
33012 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
33013 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
33014 spin_lock(&dev->count_lock);
33015 - if (!--dev->open_count) {
33016 + if (local_dec_and_test(&dev->open_count)) {
33017 if (atomic_read(&dev->ioctl_count)) {
33018 DRM_ERROR("Device busy: %d\n",
33019 atomic_read(&dev->ioctl_count));
33020 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
33021 index 8bf3770..79422805 100644
33022 --- a/drivers/gpu/drm/drm_gem.c
33023 +++ b/drivers/gpu/drm/drm_gem.c
33024 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
33025 spin_lock_init(&dev->object_name_lock);
33026 idr_init(&dev->object_name_idr);
33027 atomic_set(&dev->object_count, 0);
33028 - atomic_set(&dev->object_memory, 0);
33029 + atomic_set_unchecked(&dev->object_memory, 0);
33030 atomic_set(&dev->pin_count, 0);
33031 - atomic_set(&dev->pin_memory, 0);
33032 + atomic_set_unchecked(&dev->pin_memory, 0);
33033 atomic_set(&dev->gtt_count, 0);
33034 - atomic_set(&dev->gtt_memory, 0);
33035 + atomic_set_unchecked(&dev->gtt_memory, 0);
33036
33037 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
33038 if (!mm) {
33039 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
33040 goto fput;
33041 }
33042 atomic_inc(&dev->object_count);
33043 - atomic_add(obj->size, &dev->object_memory);
33044 + atomic_add_unchecked(obj->size, &dev->object_memory);
33045 return obj;
33046 fput:
33047 fput(obj->filp);
33048 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
33049
33050 fput(obj->filp);
33051 atomic_dec(&dev->object_count);
33052 - atomic_sub(obj->size, &dev->object_memory);
33053 + atomic_sub_unchecked(obj->size, &dev->object_memory);
33054 kfree(obj);
33055 }
33056 EXPORT_SYMBOL(drm_gem_object_free);
33057 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
33058 index f0f6c6b..34af322 100644
33059 --- a/drivers/gpu/drm/drm_info.c
33060 +++ b/drivers/gpu/drm/drm_info.c
33061 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
33062 struct drm_local_map *map;
33063 struct drm_map_list *r_list;
33064
33065 - /* Hardcoded from _DRM_FRAME_BUFFER,
33066 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
33067 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
33068 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
33069 + static const char * const types[] = {
33070 + [_DRM_FRAME_BUFFER] = "FB",
33071 + [_DRM_REGISTERS] = "REG",
33072 + [_DRM_SHM] = "SHM",
33073 + [_DRM_AGP] = "AGP",
33074 + [_DRM_SCATTER_GATHER] = "SG",
33075 + [_DRM_CONSISTENT] = "PCI",
33076 + [_DRM_GEM] = "GEM" };
33077 const char *type;
33078 int i;
33079
33080 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
33081 map = r_list->map;
33082 if (!map)
33083 continue;
33084 - if (map->type < 0 || map->type > 5)
33085 + if (map->type >= ARRAY_SIZE(types))
33086 type = "??";
33087 else
33088 type = types[map->type];
33089 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
33090 struct drm_device *dev = node->minor->dev;
33091
33092 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
33093 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
33094 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
33095 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
33096 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
33097 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
33098 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
33099 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
33100 seq_printf(m, "%d gtt total\n", dev->gtt_total);
33101 return 0;
33102 }
33103 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
33104 mutex_lock(&dev->struct_mutex);
33105 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
33106 atomic_read(&dev->vma_count),
33107 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33108 + NULL, 0);
33109 +#else
33110 high_memory, (u64)virt_to_phys(high_memory));
33111 +#endif
33112
33113 list_for_each_entry(pt, &dev->vmalist, head) {
33114 vma = pt->vma;
33115 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
33116 continue;
33117 seq_printf(m,
33118 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
33119 - pt->pid, vma->vm_start, vma->vm_end,
33120 + pt->pid,
33121 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33122 + 0, 0,
33123 +#else
33124 + vma->vm_start, vma->vm_end,
33125 +#endif
33126 vma->vm_flags & VM_READ ? 'r' : '-',
33127 vma->vm_flags & VM_WRITE ? 'w' : '-',
33128 vma->vm_flags & VM_EXEC ? 'x' : '-',
33129 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
33130 vma->vm_flags & VM_LOCKED ? 'l' : '-',
33131 vma->vm_flags & VM_IO ? 'i' : '-',
33132 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33133 + 0);
33134 +#else
33135 vma->vm_pgoff);
33136 +#endif
33137
33138 #if defined(__i386__)
33139 pgprot = pgprot_val(vma->vm_page_prot);
33140 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
33141 index 282d9fd..71e5f11 100644
33142 --- a/drivers/gpu/drm/drm_ioc32.c
33143 +++ b/drivers/gpu/drm/drm_ioc32.c
33144 @@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
33145 request = compat_alloc_user_space(nbytes);
33146 if (!access_ok(VERIFY_WRITE, request, nbytes))
33147 return -EFAULT;
33148 - list = (struct drm_buf_desc *) (request + 1);
33149 + list = (struct drm_buf_desc __user *) (request + 1);
33150
33151 if (__put_user(count, &request->count)
33152 || __put_user(list, &request->list))
33153 @@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
33154 request = compat_alloc_user_space(nbytes);
33155 if (!access_ok(VERIFY_WRITE, request, nbytes))
33156 return -EFAULT;
33157 - list = (struct drm_buf_pub *) (request + 1);
33158 + list = (struct drm_buf_pub __user *) (request + 1);
33159
33160 if (__put_user(count, &request->count)
33161 || __put_user(list, &request->list))
33162 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
33163 index 9b9ff46..4ea724c 100644
33164 --- a/drivers/gpu/drm/drm_ioctl.c
33165 +++ b/drivers/gpu/drm/drm_ioctl.c
33166 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
33167 stats->data[i].value =
33168 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
33169 else
33170 - stats->data[i].value = atomic_read(&dev->counts[i]);
33171 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
33172 stats->data[i].type = dev->types[i];
33173 }
33174
33175 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
33176 index e2f70a5..c703e86 100644
33177 --- a/drivers/gpu/drm/drm_lock.c
33178 +++ b/drivers/gpu/drm/drm_lock.c
33179 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33180 if (drm_lock_take(&master->lock, lock->context)) {
33181 master->lock.file_priv = file_priv;
33182 master->lock.lock_time = jiffies;
33183 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
33184 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
33185 break; /* Got lock */
33186 }
33187
33188 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33189 return -EINVAL;
33190 }
33191
33192 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
33193 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
33194
33195 /* kernel_context_switch isn't used by any of the x86 drm
33196 * modules but is required by the Sparc driver.
33197 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
33198 index 7d1d88c..b9131b2 100644
33199 --- a/drivers/gpu/drm/i810/i810_dma.c
33200 +++ b/drivers/gpu/drm/i810/i810_dma.c
33201 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
33202 dma->buflist[vertex->idx],
33203 vertex->discard, vertex->used);
33204
33205 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33206 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33207 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33208 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33209 sarea_priv->last_enqueue = dev_priv->counter - 1;
33210 sarea_priv->last_dispatch = (int)hw_status[5];
33211
33212 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
33213 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
33214 mc->last_render);
33215
33216 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33217 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33218 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33219 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33220 sarea_priv->last_enqueue = dev_priv->counter - 1;
33221 sarea_priv->last_dispatch = (int)hw_status[5];
33222
33223 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
33224 index 21e2691..7321edd 100644
33225 --- a/drivers/gpu/drm/i810/i810_drv.h
33226 +++ b/drivers/gpu/drm/i810/i810_drv.h
33227 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
33228 int page_flipping;
33229
33230 wait_queue_head_t irq_queue;
33231 - atomic_t irq_received;
33232 - atomic_t irq_emitted;
33233 + atomic_unchecked_t irq_received;
33234 + atomic_unchecked_t irq_emitted;
33235
33236 int front_offset;
33237 } drm_i810_private_t;
33238 diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
33239 index da82afe..48a45de 100644
33240 --- a/drivers/gpu/drm/i830/i830_drv.h
33241 +++ b/drivers/gpu/drm/i830/i830_drv.h
33242 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
33243 int page_flipping;
33244
33245 wait_queue_head_t irq_queue;
33246 - atomic_t irq_received;
33247 - atomic_t irq_emitted;
33248 + atomic_unchecked_t irq_received;
33249 + atomic_unchecked_t irq_emitted;
33250
33251 int use_mi_batchbuffer_start;
33252
33253 diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
33254 index 91ec2bb..6f21fab 100644
33255 --- a/drivers/gpu/drm/i830/i830_irq.c
33256 +++ b/drivers/gpu/drm/i830/i830_irq.c
33257 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
33258
33259 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
33260
33261 - atomic_inc(&dev_priv->irq_received);
33262 + atomic_inc_unchecked(&dev_priv->irq_received);
33263 wake_up_interruptible(&dev_priv->irq_queue);
33264
33265 return IRQ_HANDLED;
33266 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
33267
33268 DRM_DEBUG("%s\n", __func__);
33269
33270 - atomic_inc(&dev_priv->irq_emitted);
33271 + atomic_inc_unchecked(&dev_priv->irq_emitted);
33272
33273 BEGIN_LP_RING(2);
33274 OUT_RING(0);
33275 OUT_RING(GFX_OP_USER_INTERRUPT);
33276 ADVANCE_LP_RING();
33277
33278 - return atomic_read(&dev_priv->irq_emitted);
33279 + return atomic_read_unchecked(&dev_priv->irq_emitted);
33280 }
33281
33282 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33283 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33284
33285 DRM_DEBUG("%s\n", __func__);
33286
33287 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33288 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33289 return 0;
33290
33291 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
33292 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33293
33294 for (;;) {
33295 __set_current_state(TASK_INTERRUPTIBLE);
33296 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33297 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33298 break;
33299 if ((signed)(end - jiffies) <= 0) {
33300 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
33301 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
33302 I830_WRITE16(I830REG_HWSTAM, 0xffff);
33303 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
33304 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
33305 - atomic_set(&dev_priv->irq_received, 0);
33306 - atomic_set(&dev_priv->irq_emitted, 0);
33307 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33308 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
33309 init_waitqueue_head(&dev_priv->irq_queue);
33310 }
33311
33312 diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
33313 index 288fc50..c6092055 100644
33314 --- a/drivers/gpu/drm/i915/dvo.h
33315 +++ b/drivers/gpu/drm/i915/dvo.h
33316 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
33317 *
33318 * \return singly-linked list of modes or NULL if no modes found.
33319 */
33320 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
33321 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
33322
33323 /**
33324 * Clean up driver-specific bits of the output
33325 */
33326 - void (*destroy) (struct intel_dvo_device *dvo);
33327 + void (* const destroy) (struct intel_dvo_device *dvo);
33328
33329 /**
33330 * Debugging hook to dump device registers to log file
33331 */
33332 - void (*dump_regs)(struct intel_dvo_device *dvo);
33333 + void (* const dump_regs)(struct intel_dvo_device *dvo);
33334 };
33335
33336 -extern struct intel_dvo_dev_ops sil164_ops;
33337 -extern struct intel_dvo_dev_ops ch7xxx_ops;
33338 -extern struct intel_dvo_dev_ops ivch_ops;
33339 -extern struct intel_dvo_dev_ops tfp410_ops;
33340 -extern struct intel_dvo_dev_ops ch7017_ops;
33341 +extern const struct intel_dvo_dev_ops sil164_ops;
33342 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
33343 +extern const struct intel_dvo_dev_ops ivch_ops;
33344 +extern const struct intel_dvo_dev_ops tfp410_ops;
33345 +extern const struct intel_dvo_dev_ops ch7017_ops;
33346
33347 #endif /* _INTEL_DVO_H */
33348 diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
33349 index 621815b..499d82e 100644
33350 --- a/drivers/gpu/drm/i915/dvo_ch7017.c
33351 +++ b/drivers/gpu/drm/i915/dvo_ch7017.c
33352 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
33353 }
33354 }
33355
33356 -struct intel_dvo_dev_ops ch7017_ops = {
33357 +const struct intel_dvo_dev_ops ch7017_ops = {
33358 .init = ch7017_init,
33359 .detect = ch7017_detect,
33360 .mode_valid = ch7017_mode_valid,
33361 diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33362 index a9b8962..ac769ba 100644
33363 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
33364 +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33365 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
33366 }
33367 }
33368
33369 -struct intel_dvo_dev_ops ch7xxx_ops = {
33370 +const struct intel_dvo_dev_ops ch7xxx_ops = {
33371 .init = ch7xxx_init,
33372 .detect = ch7xxx_detect,
33373 .mode_valid = ch7xxx_mode_valid,
33374 diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
33375 index aa176f9..ed2930c 100644
33376 --- a/drivers/gpu/drm/i915/dvo_ivch.c
33377 +++ b/drivers/gpu/drm/i915/dvo_ivch.c
33378 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
33379 }
33380 }
33381
33382 -struct intel_dvo_dev_ops ivch_ops= {
33383 +const struct intel_dvo_dev_ops ivch_ops= {
33384 .init = ivch_init,
33385 .dpms = ivch_dpms,
33386 .save = ivch_save,
33387 diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
33388 index e1c1f73..7dbebcf 100644
33389 --- a/drivers/gpu/drm/i915/dvo_sil164.c
33390 +++ b/drivers/gpu/drm/i915/dvo_sil164.c
33391 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
33392 }
33393 }
33394
33395 -struct intel_dvo_dev_ops sil164_ops = {
33396 +const struct intel_dvo_dev_ops sil164_ops = {
33397 .init = sil164_init,
33398 .detect = sil164_detect,
33399 .mode_valid = sil164_mode_valid,
33400 diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
33401 index 16dce84..7e1b6f8 100644
33402 --- a/drivers/gpu/drm/i915/dvo_tfp410.c
33403 +++ b/drivers/gpu/drm/i915/dvo_tfp410.c
33404 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
33405 }
33406 }
33407
33408 -struct intel_dvo_dev_ops tfp410_ops = {
33409 +const struct intel_dvo_dev_ops tfp410_ops = {
33410 .init = tfp410_init,
33411 .detect = tfp410_detect,
33412 .mode_valid = tfp410_mode_valid,
33413 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33414 index 7e859d6..7d1cf2b 100644
33415 --- a/drivers/gpu/drm/i915/i915_debugfs.c
33416 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
33417 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33418 I915_READ(GTIMR));
33419 }
33420 seq_printf(m, "Interrupts received: %d\n",
33421 - atomic_read(&dev_priv->irq_received));
33422 + atomic_read_unchecked(&dev_priv->irq_received));
33423 if (dev_priv->hw_status_page != NULL) {
33424 seq_printf(m, "Current sequence: %d\n",
33425 i915_get_gem_seqno(dev));
33426 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33427 index 5449239..7e4f68d 100644
33428 --- a/drivers/gpu/drm/i915/i915_drv.c
33429 +++ b/drivers/gpu/drm/i915/i915_drv.c
33430 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33431 return i915_resume(dev);
33432 }
33433
33434 -static struct vm_operations_struct i915_gem_vm_ops = {
33435 +static const struct vm_operations_struct i915_gem_vm_ops = {
33436 .fault = i915_gem_fault,
33437 .open = drm_gem_vm_open,
33438 .close = drm_gem_vm_close,
33439 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33440 index 97163f7..c24c7c7 100644
33441 --- a/drivers/gpu/drm/i915/i915_drv.h
33442 +++ b/drivers/gpu/drm/i915/i915_drv.h
33443 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33444 /* display clock increase/decrease */
33445 /* pll clock increase/decrease */
33446 /* clock gating init */
33447 -};
33448 +} __no_const;
33449
33450 typedef struct drm_i915_private {
33451 struct drm_device *dev;
33452 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33453 int page_flipping;
33454
33455 wait_queue_head_t irq_queue;
33456 - atomic_t irq_received;
33457 + atomic_unchecked_t irq_received;
33458 /** Protects user_irq_refcount and irq_mask_reg */
33459 spinlock_t user_irq_lock;
33460 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33461 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33462 index 27a3074..eb3f959 100644
33463 --- a/drivers/gpu/drm/i915/i915_gem.c
33464 +++ b/drivers/gpu/drm/i915/i915_gem.c
33465 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33466
33467 args->aper_size = dev->gtt_total;
33468 args->aper_available_size = (args->aper_size -
33469 - atomic_read(&dev->pin_memory));
33470 + atomic_read_unchecked(&dev->pin_memory));
33471
33472 return 0;
33473 }
33474 @@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33475
33476 if (obj_priv->gtt_space) {
33477 atomic_dec(&dev->gtt_count);
33478 - atomic_sub(obj->size, &dev->gtt_memory);
33479 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33480
33481 drm_mm_put_block(obj_priv->gtt_space);
33482 obj_priv->gtt_space = NULL;
33483 @@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33484 goto search_free;
33485 }
33486 atomic_inc(&dev->gtt_count);
33487 - atomic_add(obj->size, &dev->gtt_memory);
33488 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
33489
33490 /* Assert that the object is not currently in any GPU domain. As it
33491 * wasn't in the GTT, there shouldn't be any way it could have been in
33492 @@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33493 "%d/%d gtt bytes\n",
33494 atomic_read(&dev->object_count),
33495 atomic_read(&dev->pin_count),
33496 - atomic_read(&dev->object_memory),
33497 - atomic_read(&dev->pin_memory),
33498 - atomic_read(&dev->gtt_memory),
33499 + atomic_read_unchecked(&dev->object_memory),
33500 + atomic_read_unchecked(&dev->pin_memory),
33501 + atomic_read_unchecked(&dev->gtt_memory),
33502 dev->gtt_total);
33503 }
33504 goto err;
33505 @@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33506 */
33507 if (obj_priv->pin_count == 1) {
33508 atomic_inc(&dev->pin_count);
33509 - atomic_add(obj->size, &dev->pin_memory);
33510 + atomic_add_unchecked(obj->size, &dev->pin_memory);
33511 if (!obj_priv->active &&
33512 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33513 !list_empty(&obj_priv->list))
33514 @@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33515 list_move_tail(&obj_priv->list,
33516 &dev_priv->mm.inactive_list);
33517 atomic_dec(&dev->pin_count);
33518 - atomic_sub(obj->size, &dev->pin_memory);
33519 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
33520 }
33521 i915_verify_inactive(dev, __FILE__, __LINE__);
33522 }
33523 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33524 index 63f28ad..f5469da 100644
33525 --- a/drivers/gpu/drm/i915/i915_irq.c
33526 +++ b/drivers/gpu/drm/i915/i915_irq.c
33527 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33528 int irq_received;
33529 int ret = IRQ_NONE;
33530
33531 - atomic_inc(&dev_priv->irq_received);
33532 + atomic_inc_unchecked(&dev_priv->irq_received);
33533
33534 if (IS_IGDNG(dev))
33535 return igdng_irq_handler(dev);
33536 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33537 {
33538 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33539
33540 - atomic_set(&dev_priv->irq_received, 0);
33541 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33542
33543 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33544 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33545 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33546 index 5d9c6a7..d1b0e29 100644
33547 --- a/drivers/gpu/drm/i915/intel_sdvo.c
33548 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
33549 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33550 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33551
33552 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33553 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33554 + pax_open_kernel();
33555 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33556 + pax_close_kernel();
33557
33558 /* Read the regs to test if we can talk to the device */
33559 for (i = 0; i < 0x40; i++) {
33560 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33561 index be6c6b9..8615d9c 100644
33562 --- a/drivers/gpu/drm/mga/mga_drv.h
33563 +++ b/drivers/gpu/drm/mga/mga_drv.h
33564 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33565 u32 clear_cmd;
33566 u32 maccess;
33567
33568 - atomic_t vbl_received; /**< Number of vblanks received. */
33569 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33570 wait_queue_head_t fence_queue;
33571 - atomic_t last_fence_retired;
33572 + atomic_unchecked_t last_fence_retired;
33573 u32 next_fence_to_post;
33574
33575 unsigned int fb_cpp;
33576 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33577 index daa6041..a28a5da 100644
33578 --- a/drivers/gpu/drm/mga/mga_irq.c
33579 +++ b/drivers/gpu/drm/mga/mga_irq.c
33580 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33581 if (crtc != 0)
33582 return 0;
33583
33584 - return atomic_read(&dev_priv->vbl_received);
33585 + return atomic_read_unchecked(&dev_priv->vbl_received);
33586 }
33587
33588
33589 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33590 /* VBLANK interrupt */
33591 if (status & MGA_VLINEPEN) {
33592 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33593 - atomic_inc(&dev_priv->vbl_received);
33594 + atomic_inc_unchecked(&dev_priv->vbl_received);
33595 drm_handle_vblank(dev, 0);
33596 handled = 1;
33597 }
33598 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33599 MGA_WRITE(MGA_PRIMEND, prim_end);
33600 }
33601
33602 - atomic_inc(&dev_priv->last_fence_retired);
33603 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
33604 DRM_WAKEUP(&dev_priv->fence_queue);
33605 handled = 1;
33606 }
33607 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33608 * using fences.
33609 */
33610 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33611 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33612 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33613 - *sequence) <= (1 << 23)));
33614
33615 *sequence = cur_fence;
33616 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33617 index 4c39a40..b22a9ea 100644
33618 --- a/drivers/gpu/drm/r128/r128_cce.c
33619 +++ b/drivers/gpu/drm/r128/r128_cce.c
33620 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33621
33622 /* GH: Simple idle check.
33623 */
33624 - atomic_set(&dev_priv->idle_count, 0);
33625 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33626
33627 /* We don't support anything other than bus-mastering ring mode,
33628 * but the ring can be in either AGP or PCI space for the ring
33629 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33630 index 3c60829..4faf484 100644
33631 --- a/drivers/gpu/drm/r128/r128_drv.h
33632 +++ b/drivers/gpu/drm/r128/r128_drv.h
33633 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33634 int is_pci;
33635 unsigned long cce_buffers_offset;
33636
33637 - atomic_t idle_count;
33638 + atomic_unchecked_t idle_count;
33639
33640 int page_flipping;
33641 int current_page;
33642 u32 crtc_offset;
33643 u32 crtc_offset_cntl;
33644
33645 - atomic_t vbl_received;
33646 + atomic_unchecked_t vbl_received;
33647
33648 u32 color_fmt;
33649 unsigned int front_offset;
33650 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33651 index 69810fb..97bf17a 100644
33652 --- a/drivers/gpu/drm/r128/r128_irq.c
33653 +++ b/drivers/gpu/drm/r128/r128_irq.c
33654 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33655 if (crtc != 0)
33656 return 0;
33657
33658 - return atomic_read(&dev_priv->vbl_received);
33659 + return atomic_read_unchecked(&dev_priv->vbl_received);
33660 }
33661
33662 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33663 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33664 /* VBLANK interrupt */
33665 if (status & R128_CRTC_VBLANK_INT) {
33666 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33667 - atomic_inc(&dev_priv->vbl_received);
33668 + atomic_inc_unchecked(&dev_priv->vbl_received);
33669 drm_handle_vblank(dev, 0);
33670 return IRQ_HANDLED;
33671 }
33672 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33673 index af2665c..51922d2 100644
33674 --- a/drivers/gpu/drm/r128/r128_state.c
33675 +++ b/drivers/gpu/drm/r128/r128_state.c
33676 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33677
33678 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33679 {
33680 - if (atomic_read(&dev_priv->idle_count) == 0) {
33681 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33682 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33683 } else {
33684 - atomic_set(&dev_priv->idle_count, 0);
33685 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33686 }
33687 }
33688
33689 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33690 index dd72b91..8644b3c 100644
33691 --- a/drivers/gpu/drm/radeon/atom.c
33692 +++ b/drivers/gpu/drm/radeon/atom.c
33693 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33694 char name[512];
33695 int i;
33696
33697 + pax_track_stack();
33698 +
33699 ctx->card = card;
33700 ctx->bios = bios;
33701
33702 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33703 index 0d79577..efaa7a5 100644
33704 --- a/drivers/gpu/drm/radeon/mkregtable.c
33705 +++ b/drivers/gpu/drm/radeon/mkregtable.c
33706 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33707 regex_t mask_rex;
33708 regmatch_t match[4];
33709 char buf[1024];
33710 - size_t end;
33711 + long end;
33712 int len;
33713 int done = 0;
33714 int r;
33715 unsigned o;
33716 struct offset *offset;
33717 char last_reg_s[10];
33718 - int last_reg;
33719 + unsigned long last_reg;
33720
33721 if (regcomp
33722 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33723 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33724 index 6735213..38c2c67 100644
33725 --- a/drivers/gpu/drm/radeon/radeon.h
33726 +++ b/drivers/gpu/drm/radeon/radeon.h
33727 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33728 */
33729 struct radeon_fence_driver {
33730 uint32_t scratch_reg;
33731 - atomic_t seq;
33732 + atomic_unchecked_t seq;
33733 uint32_t last_seq;
33734 unsigned long count_timeout;
33735 wait_queue_head_t queue;
33736 @@ -640,7 +640,7 @@ struct radeon_asic {
33737 uint32_t offset, uint32_t obj_size);
33738 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33739 void (*bandwidth_update)(struct radeon_device *rdev);
33740 -};
33741 +} __no_const;
33742
33743 /*
33744 * Asic structures
33745 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33746 index 4e928b9..d8b6008 100644
33747 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
33748 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33749 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33750 bool linkb;
33751 struct radeon_i2c_bus_rec ddc_bus;
33752
33753 + pax_track_stack();
33754 +
33755 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33756
33757 if (data_offset == 0)
33758 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33759 }
33760 }
33761
33762 -struct bios_connector {
33763 +static struct bios_connector {
33764 bool valid;
33765 uint16_t line_mux;
33766 uint16_t devices;
33767 int connector_type;
33768 struct radeon_i2c_bus_rec ddc_bus;
33769 -};
33770 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33771
33772 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33773 drm_device
33774 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33775 uint8_t dac;
33776 union atom_supported_devices *supported_devices;
33777 int i, j;
33778 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33779
33780 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33781
33782 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33783 index 083a181..ccccae0 100644
33784 --- a/drivers/gpu/drm/radeon/radeon_display.c
33785 +++ b/drivers/gpu/drm/radeon/radeon_display.c
33786 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33787
33788 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33789 error = freq - current_freq;
33790 - error = error < 0 ? 0xffffffff : error;
33791 + error = (int32_t)error < 0 ? 0xffffffff : error;
33792 } else
33793 error = abs(current_freq - freq);
33794 vco_diff = abs(vco - best_vco);
33795 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33796 index 76e4070..193fa7f 100644
33797 --- a/drivers/gpu/drm/radeon/radeon_drv.h
33798 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
33799 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33800
33801 /* SW interrupt */
33802 wait_queue_head_t swi_queue;
33803 - atomic_t swi_emitted;
33804 + atomic_unchecked_t swi_emitted;
33805 int vblank_crtc;
33806 uint32_t irq_enable_reg;
33807 uint32_t r500_disp_irq_reg;
33808 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33809 index 3beb26d..6ce9c4a 100644
33810 --- a/drivers/gpu/drm/radeon/radeon_fence.c
33811 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
33812 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33813 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33814 return 0;
33815 }
33816 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33817 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33818 if (!rdev->cp.ready) {
33819 /* FIXME: cp is not running assume everythings is done right
33820 * away
33821 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33822 return r;
33823 }
33824 WREG32(rdev->fence_drv.scratch_reg, 0);
33825 - atomic_set(&rdev->fence_drv.seq, 0);
33826 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33827 INIT_LIST_HEAD(&rdev->fence_drv.created);
33828 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33829 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33830 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33831 index a1bf11d..4a123c0 100644
33832 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33833 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33834 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33835 request = compat_alloc_user_space(sizeof(*request));
33836 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33837 || __put_user(req32.param, &request->param)
33838 - || __put_user((void __user *)(unsigned long)req32.value,
33839 + || __put_user((unsigned long)req32.value,
33840 &request->value))
33841 return -EFAULT;
33842
33843 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33844 index b79ecc4..8dab92d 100644
33845 --- a/drivers/gpu/drm/radeon/radeon_irq.c
33846 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
33847 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33848 unsigned int ret;
33849 RING_LOCALS;
33850
33851 - atomic_inc(&dev_priv->swi_emitted);
33852 - ret = atomic_read(&dev_priv->swi_emitted);
33853 + atomic_inc_unchecked(&dev_priv->swi_emitted);
33854 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33855
33856 BEGIN_RING(4);
33857 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33858 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33859 drm_radeon_private_t *dev_priv =
33860 (drm_radeon_private_t *) dev->dev_private;
33861
33862 - atomic_set(&dev_priv->swi_emitted, 0);
33863 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33864 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33865
33866 dev->max_vblank_count = 0x001fffff;
33867 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33868 index 4747910..48ca4b3 100644
33869 --- a/drivers/gpu/drm/radeon/radeon_state.c
33870 +++ b/drivers/gpu/drm/radeon/radeon_state.c
33871 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33872 {
33873 drm_radeon_private_t *dev_priv = dev->dev_private;
33874 drm_radeon_getparam_t *param = data;
33875 - int value;
33876 + int value = 0;
33877
33878 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33879
33880 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33881 index 1381e06..0e53b17 100644
33882 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
33883 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33884 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33885 DRM_INFO("radeon: ttm finalized\n");
33886 }
33887
33888 -static struct vm_operations_struct radeon_ttm_vm_ops;
33889 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
33890 -
33891 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33892 -{
33893 - struct ttm_buffer_object *bo;
33894 - int r;
33895 -
33896 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
33897 - if (bo == NULL) {
33898 - return VM_FAULT_NOPAGE;
33899 - }
33900 - r = ttm_vm_ops->fault(vma, vmf);
33901 - return r;
33902 -}
33903 -
33904 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33905 {
33906 struct drm_file *file_priv;
33907 struct radeon_device *rdev;
33908 - int r;
33909
33910 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33911 return drm_mmap(filp, vma);
33912 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33913
33914 file_priv = (struct drm_file *)filp->private_data;
33915 rdev = file_priv->minor->dev->dev_private;
33916 - if (rdev == NULL) {
33917 + if (!rdev)
33918 return -EINVAL;
33919 - }
33920 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33921 - if (unlikely(r != 0)) {
33922 - return r;
33923 - }
33924 - if (unlikely(ttm_vm_ops == NULL)) {
33925 - ttm_vm_ops = vma->vm_ops;
33926 - radeon_ttm_vm_ops = *ttm_vm_ops;
33927 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33928 - }
33929 - vma->vm_ops = &radeon_ttm_vm_ops;
33930 - return 0;
33931 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33932 }
33933
33934
33935 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33936 index b12ff76..0bd0c6e 100644
33937 --- a/drivers/gpu/drm/radeon/rs690.c
33938 +++ b/drivers/gpu/drm/radeon/rs690.c
33939 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33940 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33941 rdev->pm.sideport_bandwidth.full)
33942 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33943 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33944 + read_delay_latency.full = rfixed_const(800 * 1000);
33945 read_delay_latency.full = rfixed_div(read_delay_latency,
33946 rdev->pm.igp_sideport_mclk);
33947 + a.full = rfixed_const(370);
33948 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33949 } else {
33950 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33951 rdev->pm.k8_bandwidth.full)
33952 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33953 index 0ed436e..e6e7ce3 100644
33954 --- a/drivers/gpu/drm/ttm/ttm_bo.c
33955 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
33956 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33957 NULL
33958 };
33959
33960 -static struct sysfs_ops ttm_bo_global_ops = {
33961 +static const struct sysfs_ops ttm_bo_global_ops = {
33962 .show = &ttm_bo_global_show
33963 };
33964
33965 diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33966 index 1c040d0..f9e4af8 100644
33967 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33968 +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33969 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33970 {
33971 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33972 vma->vm_private_data;
33973 - struct ttm_bo_device *bdev = bo->bdev;
33974 + struct ttm_bo_device *bdev;
33975 unsigned long bus_base;
33976 unsigned long bus_offset;
33977 unsigned long bus_size;
33978 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33979 unsigned long address = (unsigned long)vmf->virtual_address;
33980 int retval = VM_FAULT_NOPAGE;
33981
33982 + if (!bo)
33983 + return VM_FAULT_NOPAGE;
33984 + bdev = bo->bdev;
33985 +
33986 /*
33987 * Work around locking order reversal in fault / nopfn
33988 * between mmap_sem and bo_reserve: Perform a trylock operation
33989 diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33990 index b170071..28ae90e 100644
33991 --- a/drivers/gpu/drm/ttm/ttm_global.c
33992 +++ b/drivers/gpu/drm/ttm/ttm_global.c
33993 @@ -36,7 +36,7 @@
33994 struct ttm_global_item {
33995 struct mutex mutex;
33996 void *object;
33997 - int refcount;
33998 + atomic_t refcount;
33999 };
34000
34001 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
34002 @@ -49,7 +49,7 @@ void ttm_global_init(void)
34003 struct ttm_global_item *item = &glob[i];
34004 mutex_init(&item->mutex);
34005 item->object = NULL;
34006 - item->refcount = 0;
34007 + atomic_set(&item->refcount, 0);
34008 }
34009 }
34010
34011 @@ -59,7 +59,7 @@ void ttm_global_release(void)
34012 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
34013 struct ttm_global_item *item = &glob[i];
34014 BUG_ON(item->object != NULL);
34015 - BUG_ON(item->refcount != 0);
34016 + BUG_ON(atomic_read(&item->refcount) != 0);
34017 }
34018 }
34019
34020 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
34021 void *object;
34022
34023 mutex_lock(&item->mutex);
34024 - if (item->refcount == 0) {
34025 + if (atomic_read(&item->refcount) == 0) {
34026 item->object = kzalloc(ref->size, GFP_KERNEL);
34027 if (unlikely(item->object == NULL)) {
34028 ret = -ENOMEM;
34029 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
34030 goto out_err;
34031
34032 }
34033 - ++item->refcount;
34034 + atomic_inc(&item->refcount);
34035 ref->object = item->object;
34036 object = item->object;
34037 mutex_unlock(&item->mutex);
34038 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
34039 struct ttm_global_item *item = &glob[ref->global_type];
34040
34041 mutex_lock(&item->mutex);
34042 - BUG_ON(item->refcount == 0);
34043 + BUG_ON(atomic_read(&item->refcount) == 0);
34044 BUG_ON(ref->object != item->object);
34045 - if (--item->refcount == 0) {
34046 + if (atomic_dec_and_test(&item->refcount)) {
34047 ref->release(ref);
34048 item->object = NULL;
34049 }
34050 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
34051 index 072c281..d8ef483 100644
34052 --- a/drivers/gpu/drm/ttm/ttm_memory.c
34053 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
34054 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
34055 NULL
34056 };
34057
34058 -static struct sysfs_ops ttm_mem_zone_ops = {
34059 +static const struct sysfs_ops ttm_mem_zone_ops = {
34060 .show = &ttm_mem_zone_show,
34061 .store = &ttm_mem_zone_store
34062 };
34063 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
34064 index cafcb84..b8e66cc 100644
34065 --- a/drivers/gpu/drm/via/via_drv.h
34066 +++ b/drivers/gpu/drm/via/via_drv.h
34067 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
34068 typedef uint32_t maskarray_t[5];
34069
34070 typedef struct drm_via_irq {
34071 - atomic_t irq_received;
34072 + atomic_unchecked_t irq_received;
34073 uint32_t pending_mask;
34074 uint32_t enable_mask;
34075 wait_queue_head_t irq_queue;
34076 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
34077 struct timeval last_vblank;
34078 int last_vblank_valid;
34079 unsigned usec_per_vblank;
34080 - atomic_t vbl_received;
34081 + atomic_unchecked_t vbl_received;
34082 drm_via_state_t hc_state;
34083 char pci_buf[VIA_PCI_BUF_SIZE];
34084 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
34085 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
34086 index 5935b88..127a8a6 100644
34087 --- a/drivers/gpu/drm/via/via_irq.c
34088 +++ b/drivers/gpu/drm/via/via_irq.c
34089 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
34090 if (crtc != 0)
34091 return 0;
34092
34093 - return atomic_read(&dev_priv->vbl_received);
34094 + return atomic_read_unchecked(&dev_priv->vbl_received);
34095 }
34096
34097 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34098 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34099
34100 status = VIA_READ(VIA_REG_INTERRUPT);
34101 if (status & VIA_IRQ_VBLANK_PENDING) {
34102 - atomic_inc(&dev_priv->vbl_received);
34103 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
34104 + atomic_inc_unchecked(&dev_priv->vbl_received);
34105 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
34106 do_gettimeofday(&cur_vblank);
34107 if (dev_priv->last_vblank_valid) {
34108 dev_priv->usec_per_vblank =
34109 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34110 dev_priv->last_vblank = cur_vblank;
34111 dev_priv->last_vblank_valid = 1;
34112 }
34113 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
34114 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
34115 DRM_DEBUG("US per vblank is: %u\n",
34116 dev_priv->usec_per_vblank);
34117 }
34118 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34119
34120 for (i = 0; i < dev_priv->num_irqs; ++i) {
34121 if (status & cur_irq->pending_mask) {
34122 - atomic_inc(&cur_irq->irq_received);
34123 + atomic_inc_unchecked(&cur_irq->irq_received);
34124 DRM_WAKEUP(&cur_irq->irq_queue);
34125 handled = 1;
34126 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
34127 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
34128 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34129 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
34130 masks[irq][4]));
34131 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
34132 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
34133 } else {
34134 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34135 (((cur_irq_sequence =
34136 - atomic_read(&cur_irq->irq_received)) -
34137 + atomic_read_unchecked(&cur_irq->irq_received)) -
34138 *sequence) <= (1 << 23)));
34139 }
34140 *sequence = cur_irq_sequence;
34141 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
34142 }
34143
34144 for (i = 0; i < dev_priv->num_irqs; ++i) {
34145 - atomic_set(&cur_irq->irq_received, 0);
34146 + atomic_set_unchecked(&cur_irq->irq_received, 0);
34147 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
34148 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
34149 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
34150 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
34151 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
34152 case VIA_IRQ_RELATIVE:
34153 irqwait->request.sequence +=
34154 - atomic_read(&cur_irq->irq_received);
34155 + atomic_read_unchecked(&cur_irq->irq_received);
34156 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
34157 case VIA_IRQ_ABSOLUTE:
34158 break;
34159 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
34160 index aa8688d..6a0140c 100644
34161 --- a/drivers/gpu/vga/vgaarb.c
34162 +++ b/drivers/gpu/vga/vgaarb.c
34163 @@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
34164 uc = &priv->cards[i];
34165 }
34166
34167 - if (!uc)
34168 - return -EINVAL;
34169 + if (!uc) {
34170 + ret_val = -EINVAL;
34171 + goto done;
34172 + }
34173
34174 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
34175 - return -EINVAL;
34176 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
34177 + ret_val = -EINVAL;
34178 + goto done;
34179 + }
34180
34181 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
34182 - return -EINVAL;
34183 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
34184 + ret_val = -EINVAL;
34185 + goto done;
34186 + }
34187
34188 vga_put(pdev, io_state);
34189
34190 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
34191 index 11f8069..4783396 100644
34192 --- a/drivers/hid/hid-core.c
34193 +++ b/drivers/hid/hid-core.c
34194 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
34195
34196 int hid_add_device(struct hid_device *hdev)
34197 {
34198 - static atomic_t id = ATOMIC_INIT(0);
34199 + static atomic_unchecked_t id = ATOMIC_INIT(0);
34200 int ret;
34201
34202 if (WARN_ON(hdev->status & HID_STAT_ADDED))
34203 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
34204 /* XXX hack, any other cleaner solution after the driver core
34205 * is converted to allow more than 20 bytes as the device name? */
34206 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
34207 - hdev->vendor, hdev->product, atomic_inc_return(&id));
34208 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
34209
34210 ret = device_add(&hdev->dev);
34211 if (!ret)
34212 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
34213 index 8b6ee24..70f657d 100644
34214 --- a/drivers/hid/usbhid/hiddev.c
34215 +++ b/drivers/hid/usbhid/hiddev.c
34216 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
34217 return put_user(HID_VERSION, (int __user *)arg);
34218
34219 case HIDIOCAPPLICATION:
34220 - if (arg < 0 || arg >= hid->maxapplication)
34221 + if (arg >= hid->maxapplication)
34222 return -EINVAL;
34223
34224 for (i = 0; i < hid->maxcollection; i++)
34225 diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
34226 index 5d5ed69..f40533e 100644
34227 --- a/drivers/hwmon/lis3lv02d.c
34228 +++ b/drivers/hwmon/lis3lv02d.c
34229 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
34230 * the lid is closed. This leads to interrupts as soon as a little move
34231 * is done.
34232 */
34233 - atomic_inc(&lis3_dev.count);
34234 + atomic_inc_unchecked(&lis3_dev.count);
34235
34236 wake_up_interruptible(&lis3_dev.misc_wait);
34237 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
34238 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34239 if (test_and_set_bit(0, &lis3_dev.misc_opened))
34240 return -EBUSY; /* already open */
34241
34242 - atomic_set(&lis3_dev.count, 0);
34243 + atomic_set_unchecked(&lis3_dev.count, 0);
34244
34245 /*
34246 * The sensor can generate interrupts for free-fall and direction
34247 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34248 add_wait_queue(&lis3_dev.misc_wait, &wait);
34249 while (true) {
34250 set_current_state(TASK_INTERRUPTIBLE);
34251 - data = atomic_xchg(&lis3_dev.count, 0);
34252 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
34253 if (data)
34254 break;
34255
34256 @@ -244,7 +244,7 @@ out:
34257 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34258 {
34259 poll_wait(file, &lis3_dev.misc_wait, wait);
34260 - if (atomic_read(&lis3_dev.count))
34261 + if (atomic_read_unchecked(&lis3_dev.count))
34262 return POLLIN | POLLRDNORM;
34263 return 0;
34264 }
34265 diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
34266 index 7cdd76f..fe0efdf 100644
34267 --- a/drivers/hwmon/lis3lv02d.h
34268 +++ b/drivers/hwmon/lis3lv02d.h
34269 @@ -201,7 +201,7 @@ struct lis3lv02d {
34270
34271 struct input_polled_dev *idev; /* input device */
34272 struct platform_device *pdev; /* platform device */
34273 - atomic_t count; /* interrupt count after last read */
34274 + atomic_unchecked_t count; /* interrupt count after last read */
34275 int xcalib; /* calibrated null value for x */
34276 int ycalib; /* calibrated null value for y */
34277 int zcalib; /* calibrated null value for z */
34278 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
34279 index 740785e..5a5c6c6 100644
34280 --- a/drivers/hwmon/sht15.c
34281 +++ b/drivers/hwmon/sht15.c
34282 @@ -112,7 +112,7 @@ struct sht15_data {
34283 int supply_uV;
34284 int supply_uV_valid;
34285 struct work_struct update_supply_work;
34286 - atomic_t interrupt_handled;
34287 + atomic_unchecked_t interrupt_handled;
34288 };
34289
34290 /**
34291 @@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
34292 return ret;
34293
34294 gpio_direction_input(data->pdata->gpio_data);
34295 - atomic_set(&data->interrupt_handled, 0);
34296 + atomic_set_unchecked(&data->interrupt_handled, 0);
34297
34298 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34299 if (gpio_get_value(data->pdata->gpio_data) == 0) {
34300 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
34301 /* Only relevant if the interrupt hasn't occured. */
34302 - if (!atomic_read(&data->interrupt_handled))
34303 + if (!atomic_read_unchecked(&data->interrupt_handled))
34304 schedule_work(&data->read_work);
34305 }
34306 ret = wait_event_timeout(data->wait_queue,
34307 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34308 struct sht15_data *data = d;
34309 /* First disable the interrupt */
34310 disable_irq_nosync(irq);
34311 - atomic_inc(&data->interrupt_handled);
34312 + atomic_inc_unchecked(&data->interrupt_handled);
34313 /* Then schedule a reading work struct */
34314 if (data->flag != SHT15_READING_NOTHING)
34315 schedule_work(&data->read_work);
34316 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34317 here as could have gone low in meantime so verify
34318 it hasn't!
34319 */
34320 - atomic_set(&data->interrupt_handled, 0);
34321 + atomic_set_unchecked(&data->interrupt_handled, 0);
34322 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34323 /* If still not occured or another handler has been scheduled */
34324 if (gpio_get_value(data->pdata->gpio_data)
34325 - || atomic_read(&data->interrupt_handled))
34326 + || atomic_read_unchecked(&data->interrupt_handled))
34327 return;
34328 }
34329 /* Read the data back from the device */
34330 diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
34331 index 97851c5..cb40626 100644
34332 --- a/drivers/hwmon/w83791d.c
34333 +++ b/drivers/hwmon/w83791d.c
34334 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
34335 struct i2c_board_info *info);
34336 static int w83791d_remove(struct i2c_client *client);
34337
34338 -static int w83791d_read(struct i2c_client *client, u8 register);
34339 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
34340 +static int w83791d_read(struct i2c_client *client, u8 reg);
34341 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
34342 static struct w83791d_data *w83791d_update_device(struct device *dev);
34343
34344 #ifdef DEBUG
34345 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34346 index 378fcb5..5e91fa8 100644
34347 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
34348 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34349 @@ -43,7 +43,7 @@
34350 extern struct i2c_adapter amd756_smbus;
34351
34352 static struct i2c_adapter *s4882_adapter;
34353 -static struct i2c_algorithm *s4882_algo;
34354 +static i2c_algorithm_no_const *s4882_algo;
34355
34356 /* Wrapper access functions for multiplexed SMBus */
34357 static DEFINE_MUTEX(amd756_lock);
34358 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34359 index 29015eb..af2d8e9 100644
34360 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34361 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34362 @@ -41,7 +41,7 @@
34363 extern struct i2c_adapter *nforce2_smbus;
34364
34365 static struct i2c_adapter *s4985_adapter;
34366 -static struct i2c_algorithm *s4985_algo;
34367 +static i2c_algorithm_no_const *s4985_algo;
34368
34369 /* Wrapper access functions for multiplexed SMBus */
34370 static DEFINE_MUTEX(nforce2_lock);
34371 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
34372 index 878f8ec..12376fc 100644
34373 --- a/drivers/ide/aec62xx.c
34374 +++ b/drivers/ide/aec62xx.c
34375 @@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
34376 .cable_detect = atp86x_cable_detect,
34377 };
34378
34379 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
34380 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
34381 { /* 0: AEC6210 */
34382 .name = DRV_NAME,
34383 .init_chipset = init_chipset_aec62xx,
34384 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
34385 index e59b6de..4b4fc65 100644
34386 --- a/drivers/ide/alim15x3.c
34387 +++ b/drivers/ide/alim15x3.c
34388 @@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
34389 .dma_sff_read_status = ide_dma_sff_read_status,
34390 };
34391
34392 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
34393 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
34394 .name = DRV_NAME,
34395 .init_chipset = init_chipset_ali15x3,
34396 .init_hwif = init_hwif_ali15x3,
34397 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
34398 index 628cd2e..087a414 100644
34399 --- a/drivers/ide/amd74xx.c
34400 +++ b/drivers/ide/amd74xx.c
34401 @@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
34402 .udma_mask = udma, \
34403 }
34404
34405 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
34406 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
34407 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
34408 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
34409 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
34410 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
34411 index 837322b..837fd71 100644
34412 --- a/drivers/ide/atiixp.c
34413 +++ b/drivers/ide/atiixp.c
34414 @@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34415 .cable_detect = atiixp_cable_detect,
34416 };
34417
34418 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34419 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34420 { /* 0: IXP200/300/400/700 */
34421 .name = DRV_NAME,
34422 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34423 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34424 index ca0c46f..d55318a 100644
34425 --- a/drivers/ide/cmd64x.c
34426 +++ b/drivers/ide/cmd64x.c
34427 @@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34428 .dma_sff_read_status = ide_dma_sff_read_status,
34429 };
34430
34431 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34432 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34433 { /* 0: CMD643 */
34434 .name = DRV_NAME,
34435 .init_chipset = init_chipset_cmd64x,
34436 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34437 index 09f98ed..cebc5bc 100644
34438 --- a/drivers/ide/cs5520.c
34439 +++ b/drivers/ide/cs5520.c
34440 @@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34441 .set_dma_mode = cs5520_set_dma_mode,
34442 };
34443
34444 -static const struct ide_port_info cyrix_chipset __devinitdata = {
34445 +static const struct ide_port_info cyrix_chipset __devinitconst = {
34446 .name = DRV_NAME,
34447 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34448 .port_ops = &cs5520_port_ops,
34449 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34450 index 40bf05e..7d58ca0 100644
34451 --- a/drivers/ide/cs5530.c
34452 +++ b/drivers/ide/cs5530.c
34453 @@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34454 .udma_filter = cs5530_udma_filter,
34455 };
34456
34457 -static const struct ide_port_info cs5530_chipset __devinitdata = {
34458 +static const struct ide_port_info cs5530_chipset __devinitconst = {
34459 .name = DRV_NAME,
34460 .init_chipset = init_chipset_cs5530,
34461 .init_hwif = init_hwif_cs5530,
34462 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34463 index 983d957..53e6172 100644
34464 --- a/drivers/ide/cs5535.c
34465 +++ b/drivers/ide/cs5535.c
34466 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34467 .cable_detect = cs5535_cable_detect,
34468 };
34469
34470 -static const struct ide_port_info cs5535_chipset __devinitdata = {
34471 +static const struct ide_port_info cs5535_chipset __devinitconst = {
34472 .name = DRV_NAME,
34473 .port_ops = &cs5535_port_ops,
34474 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34475 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34476 index 74fc540..8e933d8 100644
34477 --- a/drivers/ide/cy82c693.c
34478 +++ b/drivers/ide/cy82c693.c
34479 @@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34480 .set_dma_mode = cy82c693_set_dma_mode,
34481 };
34482
34483 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
34484 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
34485 .name = DRV_NAME,
34486 .init_iops = init_iops_cy82c693,
34487 .port_ops = &cy82c693_port_ops,
34488 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34489 index 7ce68ef..e78197d 100644
34490 --- a/drivers/ide/hpt366.c
34491 +++ b/drivers/ide/hpt366.c
34492 @@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34493 }
34494 };
34495
34496 -static const struct hpt_info hpt36x __devinitdata = {
34497 +static const struct hpt_info hpt36x __devinitconst = {
34498 .chip_name = "HPT36x",
34499 .chip_type = HPT36x,
34500 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34501 @@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34502 .timings = &hpt36x_timings
34503 };
34504
34505 -static const struct hpt_info hpt370 __devinitdata = {
34506 +static const struct hpt_info hpt370 __devinitconst = {
34507 .chip_name = "HPT370",
34508 .chip_type = HPT370,
34509 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34510 @@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34511 .timings = &hpt37x_timings
34512 };
34513
34514 -static const struct hpt_info hpt370a __devinitdata = {
34515 +static const struct hpt_info hpt370a __devinitconst = {
34516 .chip_name = "HPT370A",
34517 .chip_type = HPT370A,
34518 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34519 @@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34520 .timings = &hpt37x_timings
34521 };
34522
34523 -static const struct hpt_info hpt374 __devinitdata = {
34524 +static const struct hpt_info hpt374 __devinitconst = {
34525 .chip_name = "HPT374",
34526 .chip_type = HPT374,
34527 .udma_mask = ATA_UDMA5,
34528 @@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34529 .timings = &hpt37x_timings
34530 };
34531
34532 -static const struct hpt_info hpt372 __devinitdata = {
34533 +static const struct hpt_info hpt372 __devinitconst = {
34534 .chip_name = "HPT372",
34535 .chip_type = HPT372,
34536 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34537 @@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34538 .timings = &hpt37x_timings
34539 };
34540
34541 -static const struct hpt_info hpt372a __devinitdata = {
34542 +static const struct hpt_info hpt372a __devinitconst = {
34543 .chip_name = "HPT372A",
34544 .chip_type = HPT372A,
34545 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34546 @@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34547 .timings = &hpt37x_timings
34548 };
34549
34550 -static const struct hpt_info hpt302 __devinitdata = {
34551 +static const struct hpt_info hpt302 __devinitconst = {
34552 .chip_name = "HPT302",
34553 .chip_type = HPT302,
34554 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34555 @@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34556 .timings = &hpt37x_timings
34557 };
34558
34559 -static const struct hpt_info hpt371 __devinitdata = {
34560 +static const struct hpt_info hpt371 __devinitconst = {
34561 .chip_name = "HPT371",
34562 .chip_type = HPT371,
34563 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34564 @@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34565 .timings = &hpt37x_timings
34566 };
34567
34568 -static const struct hpt_info hpt372n __devinitdata = {
34569 +static const struct hpt_info hpt372n __devinitconst = {
34570 .chip_name = "HPT372N",
34571 .chip_type = HPT372N,
34572 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34573 @@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34574 .timings = &hpt37x_timings
34575 };
34576
34577 -static const struct hpt_info hpt302n __devinitdata = {
34578 +static const struct hpt_info hpt302n __devinitconst = {
34579 .chip_name = "HPT302N",
34580 .chip_type = HPT302N,
34581 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34582 @@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34583 .timings = &hpt37x_timings
34584 };
34585
34586 -static const struct hpt_info hpt371n __devinitdata = {
34587 +static const struct hpt_info hpt371n __devinitconst = {
34588 .chip_name = "HPT371N",
34589 .chip_type = HPT371N,
34590 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34591 @@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34592 .dma_sff_read_status = ide_dma_sff_read_status,
34593 };
34594
34595 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34596 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34597 { /* 0: HPT36x */
34598 .name = DRV_NAME,
34599 .init_chipset = init_chipset_hpt366,
34600 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34601 index 2de76cc..74186a1 100644
34602 --- a/drivers/ide/ide-cd.c
34603 +++ b/drivers/ide/ide-cd.c
34604 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34605 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34606 if ((unsigned long)buf & alignment
34607 || blk_rq_bytes(rq) & q->dma_pad_mask
34608 - || object_is_on_stack(buf))
34609 + || object_starts_on_stack(buf))
34610 drive->dma = 0;
34611 }
34612 }
34613 diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34614 index fefbdfc..62ff465 100644
34615 --- a/drivers/ide/ide-floppy.c
34616 +++ b/drivers/ide/ide-floppy.c
34617 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34618 u8 pc_buf[256], header_len, desc_cnt;
34619 int i, rc = 1, blocks, length;
34620
34621 + pax_track_stack();
34622 +
34623 ide_debug_log(IDE_DBG_FUNC, "enter");
34624
34625 drive->bios_cyl = 0;
34626 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34627 index 39d4e01..11538ce 100644
34628 --- a/drivers/ide/ide-pci-generic.c
34629 +++ b/drivers/ide/ide-pci-generic.c
34630 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34631 .udma_mask = ATA_UDMA6, \
34632 }
34633
34634 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
34635 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
34636 /* 0: Unknown */
34637 DECLARE_GENERIC_PCI_DEV(0),
34638
34639 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34640 index 0d266a5..aaca790 100644
34641 --- a/drivers/ide/it8172.c
34642 +++ b/drivers/ide/it8172.c
34643 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34644 .set_dma_mode = it8172_set_dma_mode,
34645 };
34646
34647 -static const struct ide_port_info it8172_port_info __devinitdata = {
34648 +static const struct ide_port_info it8172_port_info __devinitconst = {
34649 .name = DRV_NAME,
34650 .port_ops = &it8172_port_ops,
34651 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34652 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34653 index 4797616..4be488a 100644
34654 --- a/drivers/ide/it8213.c
34655 +++ b/drivers/ide/it8213.c
34656 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34657 .cable_detect = it8213_cable_detect,
34658 };
34659
34660 -static const struct ide_port_info it8213_chipset __devinitdata = {
34661 +static const struct ide_port_info it8213_chipset __devinitconst = {
34662 .name = DRV_NAME,
34663 .enablebits = { {0x41, 0x80, 0x80} },
34664 .port_ops = &it8213_port_ops,
34665 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34666 index 51aa745..146ee60 100644
34667 --- a/drivers/ide/it821x.c
34668 +++ b/drivers/ide/it821x.c
34669 @@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34670 .cable_detect = it821x_cable_detect,
34671 };
34672
34673 -static const struct ide_port_info it821x_chipset __devinitdata = {
34674 +static const struct ide_port_info it821x_chipset __devinitconst = {
34675 .name = DRV_NAME,
34676 .init_chipset = init_chipset_it821x,
34677 .init_hwif = init_hwif_it821x,
34678 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34679 index bf2be64..9270098 100644
34680 --- a/drivers/ide/jmicron.c
34681 +++ b/drivers/ide/jmicron.c
34682 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34683 .cable_detect = jmicron_cable_detect,
34684 };
34685
34686 -static const struct ide_port_info jmicron_chipset __devinitdata = {
34687 +static const struct ide_port_info jmicron_chipset __devinitconst = {
34688 .name = DRV_NAME,
34689 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34690 .port_ops = &jmicron_port_ops,
34691 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34692 index 95327a2..73f78d8 100644
34693 --- a/drivers/ide/ns87415.c
34694 +++ b/drivers/ide/ns87415.c
34695 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34696 .dma_sff_read_status = superio_dma_sff_read_status,
34697 };
34698
34699 -static const struct ide_port_info ns87415_chipset __devinitdata = {
34700 +static const struct ide_port_info ns87415_chipset __devinitconst = {
34701 .name = DRV_NAME,
34702 .init_hwif = init_hwif_ns87415,
34703 .tp_ops = &ns87415_tp_ops,
34704 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34705 index f1d70d6..e1de05b 100644
34706 --- a/drivers/ide/opti621.c
34707 +++ b/drivers/ide/opti621.c
34708 @@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34709 .set_pio_mode = opti621_set_pio_mode,
34710 };
34711
34712 -static const struct ide_port_info opti621_chipset __devinitdata = {
34713 +static const struct ide_port_info opti621_chipset __devinitconst = {
34714 .name = DRV_NAME,
34715 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34716 .port_ops = &opti621_port_ops,
34717 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34718 index 65ba823..7311f4d 100644
34719 --- a/drivers/ide/pdc202xx_new.c
34720 +++ b/drivers/ide/pdc202xx_new.c
34721 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34722 .udma_mask = udma, \
34723 }
34724
34725 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34726 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34727 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34728 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34729 };
34730 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34731 index cb812f3..af816ef 100644
34732 --- a/drivers/ide/pdc202xx_old.c
34733 +++ b/drivers/ide/pdc202xx_old.c
34734 @@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34735 .max_sectors = sectors, \
34736 }
34737
34738 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34739 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34740 { /* 0: PDC20246 */
34741 .name = DRV_NAME,
34742 .init_chipset = init_chipset_pdc202xx,
34743 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34744 index bf14f39..15c4b98 100644
34745 --- a/drivers/ide/piix.c
34746 +++ b/drivers/ide/piix.c
34747 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34748 .udma_mask = udma, \
34749 }
34750
34751 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
34752 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
34753 /* 0: MPIIX */
34754 { /*
34755 * MPIIX actually has only a single IDE channel mapped to
34756 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34757 index a6414a8..c04173e 100644
34758 --- a/drivers/ide/rz1000.c
34759 +++ b/drivers/ide/rz1000.c
34760 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34761 }
34762 }
34763
34764 -static const struct ide_port_info rz1000_chipset __devinitdata = {
34765 +static const struct ide_port_info rz1000_chipset __devinitconst = {
34766 .name = DRV_NAME,
34767 .host_flags = IDE_HFLAG_NO_DMA,
34768 };
34769 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34770 index d467478..9203942 100644
34771 --- a/drivers/ide/sc1200.c
34772 +++ b/drivers/ide/sc1200.c
34773 @@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34774 .dma_sff_read_status = ide_dma_sff_read_status,
34775 };
34776
34777 -static const struct ide_port_info sc1200_chipset __devinitdata = {
34778 +static const struct ide_port_info sc1200_chipset __devinitconst = {
34779 .name = DRV_NAME,
34780 .port_ops = &sc1200_port_ops,
34781 .dma_ops = &sc1200_dma_ops,
34782 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34783 index 1104bb3..59c5194 100644
34784 --- a/drivers/ide/scc_pata.c
34785 +++ b/drivers/ide/scc_pata.c
34786 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34787 .dma_sff_read_status = scc_dma_sff_read_status,
34788 };
34789
34790 -static const struct ide_port_info scc_chipset __devinitdata = {
34791 +static const struct ide_port_info scc_chipset __devinitconst = {
34792 .name = "sccIDE",
34793 .init_iops = init_iops_scc,
34794 .init_dma = scc_init_dma,
34795 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34796 index b6554ef..6cc2cc3 100644
34797 --- a/drivers/ide/serverworks.c
34798 +++ b/drivers/ide/serverworks.c
34799 @@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34800 .cable_detect = svwks_cable_detect,
34801 };
34802
34803 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34804 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34805 { /* 0: OSB4 */
34806 .name = DRV_NAME,
34807 .init_chipset = init_chipset_svwks,
34808 diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34809 index ab3db61..afed580 100644
34810 --- a/drivers/ide/setup-pci.c
34811 +++ b/drivers/ide/setup-pci.c
34812 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34813 int ret, i, n_ports = dev2 ? 4 : 2;
34814 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34815
34816 + pax_track_stack();
34817 +
34818 for (i = 0; i < n_ports / 2; i++) {
34819 ret = ide_setup_pci_controller(pdev[i], d, !i);
34820 if (ret < 0)
34821 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34822 index d95df52..0b03a39 100644
34823 --- a/drivers/ide/siimage.c
34824 +++ b/drivers/ide/siimage.c
34825 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34826 .udma_mask = ATA_UDMA6, \
34827 }
34828
34829 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34830 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34831 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34832 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34833 };
34834 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34835 index 3b88eba..ca8699d 100644
34836 --- a/drivers/ide/sis5513.c
34837 +++ b/drivers/ide/sis5513.c
34838 @@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34839 .cable_detect = sis_cable_detect,
34840 };
34841
34842 -static const struct ide_port_info sis5513_chipset __devinitdata = {
34843 +static const struct ide_port_info sis5513_chipset __devinitconst = {
34844 .name = DRV_NAME,
34845 .init_chipset = init_chipset_sis5513,
34846 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34847 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34848 index d698da4..fca42a4 100644
34849 --- a/drivers/ide/sl82c105.c
34850 +++ b/drivers/ide/sl82c105.c
34851 @@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34852 .dma_sff_read_status = ide_dma_sff_read_status,
34853 };
34854
34855 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
34856 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
34857 .name = DRV_NAME,
34858 .init_chipset = init_chipset_sl82c105,
34859 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34860 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34861 index 1ccfb40..83d5779 100644
34862 --- a/drivers/ide/slc90e66.c
34863 +++ b/drivers/ide/slc90e66.c
34864 @@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34865 .cable_detect = slc90e66_cable_detect,
34866 };
34867
34868 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
34869 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
34870 .name = DRV_NAME,
34871 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34872 .port_ops = &slc90e66_port_ops,
34873 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34874 index 05a93d6..5f9e325 100644
34875 --- a/drivers/ide/tc86c001.c
34876 +++ b/drivers/ide/tc86c001.c
34877 @@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34878 .dma_sff_read_status = ide_dma_sff_read_status,
34879 };
34880
34881 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
34882 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
34883 .name = DRV_NAME,
34884 .init_hwif = init_hwif_tc86c001,
34885 .port_ops = &tc86c001_port_ops,
34886 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34887 index 8773c3b..7907d6c 100644
34888 --- a/drivers/ide/triflex.c
34889 +++ b/drivers/ide/triflex.c
34890 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34891 .set_dma_mode = triflex_set_mode,
34892 };
34893
34894 -static const struct ide_port_info triflex_device __devinitdata = {
34895 +static const struct ide_port_info triflex_device __devinitconst = {
34896 .name = DRV_NAME,
34897 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34898 .port_ops = &triflex_port_ops,
34899 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34900 index 4b42ca0..e494a98 100644
34901 --- a/drivers/ide/trm290.c
34902 +++ b/drivers/ide/trm290.c
34903 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34904 .dma_check = trm290_dma_check,
34905 };
34906
34907 -static const struct ide_port_info trm290_chipset __devinitdata = {
34908 +static const struct ide_port_info trm290_chipset __devinitconst = {
34909 .name = DRV_NAME,
34910 .init_hwif = init_hwif_trm290,
34911 .tp_ops = &trm290_tp_ops,
34912 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34913 index 028de26..520d5d5 100644
34914 --- a/drivers/ide/via82cxxx.c
34915 +++ b/drivers/ide/via82cxxx.c
34916 @@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34917 .cable_detect = via82cxxx_cable_detect,
34918 };
34919
34920 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34921 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34922 .name = DRV_NAME,
34923 .init_chipset = init_chipset_via82cxxx,
34924 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34925 diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34926 index 2cd00b5..14de699 100644
34927 --- a/drivers/ieee1394/dv1394.c
34928 +++ b/drivers/ieee1394/dv1394.c
34929 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34930 based upon DIF section and sequence
34931 */
34932
34933 -static void inline
34934 +static inline void
34935 frame_put_packet (struct frame *f, struct packet *p)
34936 {
34937 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34938 diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34939 index e947d8f..6a966b9 100644
34940 --- a/drivers/ieee1394/hosts.c
34941 +++ b/drivers/ieee1394/hosts.c
34942 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34943 }
34944
34945 static struct hpsb_host_driver dummy_driver = {
34946 + .name = "dummy",
34947 .transmit_packet = dummy_transmit_packet,
34948 .devctl = dummy_devctl,
34949 .isoctl = dummy_isoctl
34950 diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34951 index ddaab6e..8d37435 100644
34952 --- a/drivers/ieee1394/init_ohci1394_dma.c
34953 +++ b/drivers/ieee1394/init_ohci1394_dma.c
34954 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34955 for (func = 0; func < 8; func++) {
34956 u32 class = read_pci_config(num,slot,func,
34957 PCI_CLASS_REVISION);
34958 - if ((class == 0xffffffff))
34959 + if (class == 0xffffffff)
34960 continue; /* No device at this func */
34961
34962 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34963 diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34964 index 65c1429..5d8c11f 100644
34965 --- a/drivers/ieee1394/ohci1394.c
34966 +++ b/drivers/ieee1394/ohci1394.c
34967 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34968 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34969
34970 /* Module Parameters */
34971 -static int phys_dma = 1;
34972 +static int phys_dma;
34973 module_param(phys_dma, int, 0444);
34974 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34975 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34976
34977 static void dma_trm_tasklet(unsigned long data);
34978 static void dma_trm_reset(struct dma_trm_ctx *d);
34979 diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34980 index f199896..78c9fc8 100644
34981 --- a/drivers/ieee1394/sbp2.c
34982 +++ b/drivers/ieee1394/sbp2.c
34983 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34984 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34985 MODULE_LICENSE("GPL");
34986
34987 -static int sbp2_module_init(void)
34988 +static int __init sbp2_module_init(void)
34989 {
34990 int ret;
34991
34992 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34993 index a5dea6b..0cefe8f 100644
34994 --- a/drivers/infiniband/core/cm.c
34995 +++ b/drivers/infiniband/core/cm.c
34996 @@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34997
34998 struct cm_counter_group {
34999 struct kobject obj;
35000 - atomic_long_t counter[CM_ATTR_COUNT];
35001 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
35002 };
35003
35004 struct cm_counter_attribute {
35005 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
35006 struct ib_mad_send_buf *msg = NULL;
35007 int ret;
35008
35009 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35010 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35011 counter[CM_REQ_COUNTER]);
35012
35013 /* Quick state check to discard duplicate REQs. */
35014 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
35015 if (!cm_id_priv)
35016 return;
35017
35018 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35019 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35020 counter[CM_REP_COUNTER]);
35021 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
35022 if (ret)
35023 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
35024 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
35025 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
35026 spin_unlock_irq(&cm_id_priv->lock);
35027 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35028 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35029 counter[CM_RTU_COUNTER]);
35030 goto out;
35031 }
35032 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
35033 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
35034 dreq_msg->local_comm_id);
35035 if (!cm_id_priv) {
35036 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35037 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35038 counter[CM_DREQ_COUNTER]);
35039 cm_issue_drep(work->port, work->mad_recv_wc);
35040 return -EINVAL;
35041 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
35042 case IB_CM_MRA_REP_RCVD:
35043 break;
35044 case IB_CM_TIMEWAIT:
35045 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35046 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35047 counter[CM_DREQ_COUNTER]);
35048 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35049 goto unlock;
35050 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
35051 cm_free_msg(msg);
35052 goto deref;
35053 case IB_CM_DREQ_RCVD:
35054 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35055 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35056 counter[CM_DREQ_COUNTER]);
35057 goto unlock;
35058 default:
35059 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
35060 ib_modify_mad(cm_id_priv->av.port->mad_agent,
35061 cm_id_priv->msg, timeout)) {
35062 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
35063 - atomic_long_inc(&work->port->
35064 + atomic_long_inc_unchecked(&work->port->
35065 counter_group[CM_RECV_DUPLICATES].
35066 counter[CM_MRA_COUNTER]);
35067 goto out;
35068 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
35069 break;
35070 case IB_CM_MRA_REQ_RCVD:
35071 case IB_CM_MRA_REP_RCVD:
35072 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35073 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35074 counter[CM_MRA_COUNTER]);
35075 /* fall through */
35076 default:
35077 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
35078 case IB_CM_LAP_IDLE:
35079 break;
35080 case IB_CM_MRA_LAP_SENT:
35081 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35082 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35083 counter[CM_LAP_COUNTER]);
35084 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35085 goto unlock;
35086 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
35087 cm_free_msg(msg);
35088 goto deref;
35089 case IB_CM_LAP_RCVD:
35090 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35091 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35092 counter[CM_LAP_COUNTER]);
35093 goto unlock;
35094 default:
35095 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
35096 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
35097 if (cur_cm_id_priv) {
35098 spin_unlock_irq(&cm.lock);
35099 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35100 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35101 counter[CM_SIDR_REQ_COUNTER]);
35102 goto out; /* Duplicate message. */
35103 }
35104 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
35105 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
35106 msg->retries = 1;
35107
35108 - atomic_long_add(1 + msg->retries,
35109 + atomic_long_add_unchecked(1 + msg->retries,
35110 &port->counter_group[CM_XMIT].counter[attr_index]);
35111 if (msg->retries)
35112 - atomic_long_add(msg->retries,
35113 + atomic_long_add_unchecked(msg->retries,
35114 &port->counter_group[CM_XMIT_RETRIES].
35115 counter[attr_index]);
35116
35117 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
35118 }
35119
35120 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
35121 - atomic_long_inc(&port->counter_group[CM_RECV].
35122 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
35123 counter[attr_id - CM_ATTR_ID_OFFSET]);
35124
35125 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
35126 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
35127 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
35128
35129 return sprintf(buf, "%ld\n",
35130 - atomic_long_read(&group->counter[cm_attr->index]));
35131 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
35132 }
35133
35134 -static struct sysfs_ops cm_counter_ops = {
35135 +static const struct sysfs_ops cm_counter_ops = {
35136 .show = cm_show_counter
35137 };
35138
35139 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
35140 index 8fd3a6f..61d8075 100644
35141 --- a/drivers/infiniband/core/cma.c
35142 +++ b/drivers/infiniband/core/cma.c
35143 @@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
35144
35145 req.private_data_len = sizeof(struct cma_hdr) +
35146 conn_param->private_data_len;
35147 + if (req.private_data_len < conn_param->private_data_len)
35148 + return -EINVAL;
35149 +
35150 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35151 if (!req.private_data)
35152 return -ENOMEM;
35153 @@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
35154 memset(&req, 0, sizeof req);
35155 offset = cma_user_data_offset(id_priv->id.ps);
35156 req.private_data_len = offset + conn_param->private_data_len;
35157 + if (req.private_data_len < conn_param->private_data_len)
35158 + return -EINVAL;
35159 +
35160 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35161 if (!private_data)
35162 return -ENOMEM;
35163 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
35164 index 4507043..14ad522 100644
35165 --- a/drivers/infiniband/core/fmr_pool.c
35166 +++ b/drivers/infiniband/core/fmr_pool.c
35167 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
35168
35169 struct task_struct *thread;
35170
35171 - atomic_t req_ser;
35172 - atomic_t flush_ser;
35173 + atomic_unchecked_t req_ser;
35174 + atomic_unchecked_t flush_ser;
35175
35176 wait_queue_head_t force_wait;
35177 };
35178 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35179 struct ib_fmr_pool *pool = pool_ptr;
35180
35181 do {
35182 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
35183 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
35184 ib_fmr_batch_release(pool);
35185
35186 - atomic_inc(&pool->flush_ser);
35187 + atomic_inc_unchecked(&pool->flush_ser);
35188 wake_up_interruptible(&pool->force_wait);
35189
35190 if (pool->flush_function)
35191 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35192 }
35193
35194 set_current_state(TASK_INTERRUPTIBLE);
35195 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
35196 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
35197 !kthread_should_stop())
35198 schedule();
35199 __set_current_state(TASK_RUNNING);
35200 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
35201 pool->dirty_watermark = params->dirty_watermark;
35202 pool->dirty_len = 0;
35203 spin_lock_init(&pool->pool_lock);
35204 - atomic_set(&pool->req_ser, 0);
35205 - atomic_set(&pool->flush_ser, 0);
35206 + atomic_set_unchecked(&pool->req_ser, 0);
35207 + atomic_set_unchecked(&pool->flush_ser, 0);
35208 init_waitqueue_head(&pool->force_wait);
35209
35210 pool->thread = kthread_run(ib_fmr_cleanup_thread,
35211 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
35212 }
35213 spin_unlock_irq(&pool->pool_lock);
35214
35215 - serial = atomic_inc_return(&pool->req_ser);
35216 + serial = atomic_inc_return_unchecked(&pool->req_ser);
35217 wake_up_process(pool->thread);
35218
35219 if (wait_event_interruptible(pool->force_wait,
35220 - atomic_read(&pool->flush_ser) - serial >= 0))
35221 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
35222 return -EINTR;
35223
35224 return 0;
35225 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
35226 } else {
35227 list_add_tail(&fmr->list, &pool->dirty_list);
35228 if (++pool->dirty_len >= pool->dirty_watermark) {
35229 - atomic_inc(&pool->req_ser);
35230 + atomic_inc_unchecked(&pool->req_ser);
35231 wake_up_process(pool->thread);
35232 }
35233 }
35234 diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
35235 index 158a214..1558bb7 100644
35236 --- a/drivers/infiniband/core/sysfs.c
35237 +++ b/drivers/infiniband/core/sysfs.c
35238 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
35239 return port_attr->show(p, port_attr, buf);
35240 }
35241
35242 -static struct sysfs_ops port_sysfs_ops = {
35243 +static const struct sysfs_ops port_sysfs_ops = {
35244 .show = port_attr_show
35245 };
35246
35247 diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
35248 index 5440da0..1194ecb 100644
35249 --- a/drivers/infiniband/core/uverbs_marshall.c
35250 +++ b/drivers/infiniband/core/uverbs_marshall.c
35251 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
35252 dst->grh.sgid_index = src->grh.sgid_index;
35253 dst->grh.hop_limit = src->grh.hop_limit;
35254 dst->grh.traffic_class = src->grh.traffic_class;
35255 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
35256 dst->dlid = src->dlid;
35257 dst->sl = src->sl;
35258 dst->src_path_bits = src->src_path_bits;
35259 dst->static_rate = src->static_rate;
35260 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
35261 dst->port_num = src->port_num;
35262 + dst->reserved = 0;
35263 }
35264 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
35265
35266 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35267 struct ib_qp_attr *src)
35268 {
35269 + dst->qp_state = src->qp_state;
35270 dst->cur_qp_state = src->cur_qp_state;
35271 dst->path_mtu = src->path_mtu;
35272 dst->path_mig_state = src->path_mig_state;
35273 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35274 dst->rnr_retry = src->rnr_retry;
35275 dst->alt_port_num = src->alt_port_num;
35276 dst->alt_timeout = src->alt_timeout;
35277 + memset(dst->reserved, 0, sizeof(dst->reserved));
35278 }
35279 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
35280
35281 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
35282 index 100da85..62e6b88 100644
35283 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
35284 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
35285 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
35286 struct infinipath_counters counters;
35287 struct ipath_devdata *dd;
35288
35289 + pax_track_stack();
35290 +
35291 dd = file->f_path.dentry->d_inode->i_private;
35292 dd->ipath_f_read_counters(dd, &counters);
35293
35294 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
35295 index cbde0cf..afaf55c 100644
35296 --- a/drivers/infiniband/hw/nes/nes.c
35297 +++ b/drivers/infiniband/hw/nes/nes.c
35298 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
35299 LIST_HEAD(nes_adapter_list);
35300 static LIST_HEAD(nes_dev_list);
35301
35302 -atomic_t qps_destroyed;
35303 +atomic_unchecked_t qps_destroyed;
35304
35305 static unsigned int ee_flsh_adapter;
35306 static unsigned int sysfs_nonidx_addr;
35307 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
35308 struct nes_adapter *nesadapter = nesdev->nesadapter;
35309 u32 qp_id;
35310
35311 - atomic_inc(&qps_destroyed);
35312 + atomic_inc_unchecked(&qps_destroyed);
35313
35314 /* Free the control structures */
35315
35316 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
35317 index bcc6abc..9c76b2f 100644
35318 --- a/drivers/infiniband/hw/nes/nes.h
35319 +++ b/drivers/infiniband/hw/nes/nes.h
35320 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
35321 extern unsigned int wqm_quanta;
35322 extern struct list_head nes_adapter_list;
35323
35324 -extern atomic_t cm_connects;
35325 -extern atomic_t cm_accepts;
35326 -extern atomic_t cm_disconnects;
35327 -extern atomic_t cm_closes;
35328 -extern atomic_t cm_connecteds;
35329 -extern atomic_t cm_connect_reqs;
35330 -extern atomic_t cm_rejects;
35331 -extern atomic_t mod_qp_timouts;
35332 -extern atomic_t qps_created;
35333 -extern atomic_t qps_destroyed;
35334 -extern atomic_t sw_qps_destroyed;
35335 +extern atomic_unchecked_t cm_connects;
35336 +extern atomic_unchecked_t cm_accepts;
35337 +extern atomic_unchecked_t cm_disconnects;
35338 +extern atomic_unchecked_t cm_closes;
35339 +extern atomic_unchecked_t cm_connecteds;
35340 +extern atomic_unchecked_t cm_connect_reqs;
35341 +extern atomic_unchecked_t cm_rejects;
35342 +extern atomic_unchecked_t mod_qp_timouts;
35343 +extern atomic_unchecked_t qps_created;
35344 +extern atomic_unchecked_t qps_destroyed;
35345 +extern atomic_unchecked_t sw_qps_destroyed;
35346 extern u32 mh_detected;
35347 extern u32 mh_pauses_sent;
35348 extern u32 cm_packets_sent;
35349 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
35350 extern u32 cm_listens_created;
35351 extern u32 cm_listens_destroyed;
35352 extern u32 cm_backlog_drops;
35353 -extern atomic_t cm_loopbacks;
35354 -extern atomic_t cm_nodes_created;
35355 -extern atomic_t cm_nodes_destroyed;
35356 -extern atomic_t cm_accel_dropped_pkts;
35357 -extern atomic_t cm_resets_recvd;
35358 +extern atomic_unchecked_t cm_loopbacks;
35359 +extern atomic_unchecked_t cm_nodes_created;
35360 +extern atomic_unchecked_t cm_nodes_destroyed;
35361 +extern atomic_unchecked_t cm_accel_dropped_pkts;
35362 +extern atomic_unchecked_t cm_resets_recvd;
35363
35364 extern u32 int_mod_timer_init;
35365 extern u32 int_mod_cq_depth_256;
35366 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35367 index 73473db..5ed06e8 100644
35368 --- a/drivers/infiniband/hw/nes/nes_cm.c
35369 +++ b/drivers/infiniband/hw/nes/nes_cm.c
35370 @@ -69,11 +69,11 @@ u32 cm_packets_received;
35371 u32 cm_listens_created;
35372 u32 cm_listens_destroyed;
35373 u32 cm_backlog_drops;
35374 -atomic_t cm_loopbacks;
35375 -atomic_t cm_nodes_created;
35376 -atomic_t cm_nodes_destroyed;
35377 -atomic_t cm_accel_dropped_pkts;
35378 -atomic_t cm_resets_recvd;
35379 +atomic_unchecked_t cm_loopbacks;
35380 +atomic_unchecked_t cm_nodes_created;
35381 +atomic_unchecked_t cm_nodes_destroyed;
35382 +atomic_unchecked_t cm_accel_dropped_pkts;
35383 +atomic_unchecked_t cm_resets_recvd;
35384
35385 static inline int mini_cm_accelerated(struct nes_cm_core *,
35386 struct nes_cm_node *);
35387 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
35388
35389 static struct nes_cm_core *g_cm_core;
35390
35391 -atomic_t cm_connects;
35392 -atomic_t cm_accepts;
35393 -atomic_t cm_disconnects;
35394 -atomic_t cm_closes;
35395 -atomic_t cm_connecteds;
35396 -atomic_t cm_connect_reqs;
35397 -atomic_t cm_rejects;
35398 +atomic_unchecked_t cm_connects;
35399 +atomic_unchecked_t cm_accepts;
35400 +atomic_unchecked_t cm_disconnects;
35401 +atomic_unchecked_t cm_closes;
35402 +atomic_unchecked_t cm_connecteds;
35403 +atomic_unchecked_t cm_connect_reqs;
35404 +atomic_unchecked_t cm_rejects;
35405
35406
35407 /**
35408 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35409 cm_node->rem_mac);
35410
35411 add_hte_node(cm_core, cm_node);
35412 - atomic_inc(&cm_nodes_created);
35413 + atomic_inc_unchecked(&cm_nodes_created);
35414
35415 return cm_node;
35416 }
35417 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35418 }
35419
35420 atomic_dec(&cm_core->node_cnt);
35421 - atomic_inc(&cm_nodes_destroyed);
35422 + atomic_inc_unchecked(&cm_nodes_destroyed);
35423 nesqp = cm_node->nesqp;
35424 if (nesqp) {
35425 nesqp->cm_node = NULL;
35426 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35427
35428 static void drop_packet(struct sk_buff *skb)
35429 {
35430 - atomic_inc(&cm_accel_dropped_pkts);
35431 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35432 dev_kfree_skb_any(skb);
35433 }
35434
35435 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35436
35437 int reset = 0; /* whether to send reset in case of err.. */
35438 int passive_state;
35439 - atomic_inc(&cm_resets_recvd);
35440 + atomic_inc_unchecked(&cm_resets_recvd);
35441 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35442 " refcnt=%d\n", cm_node, cm_node->state,
35443 atomic_read(&cm_node->ref_count));
35444 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35445 rem_ref_cm_node(cm_node->cm_core, cm_node);
35446 return NULL;
35447 }
35448 - atomic_inc(&cm_loopbacks);
35449 + atomic_inc_unchecked(&cm_loopbacks);
35450 loopbackremotenode->loopbackpartner = cm_node;
35451 loopbackremotenode->tcp_cntxt.rcv_wscale =
35452 NES_CM_DEFAULT_RCV_WND_SCALE;
35453 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35454 add_ref_cm_node(cm_node);
35455 } else if (cm_node->state == NES_CM_STATE_TSA) {
35456 rem_ref_cm_node(cm_core, cm_node);
35457 - atomic_inc(&cm_accel_dropped_pkts);
35458 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35459 dev_kfree_skb_any(skb);
35460 break;
35461 }
35462 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35463
35464 if ((cm_id) && (cm_id->event_handler)) {
35465 if (issue_disconn) {
35466 - atomic_inc(&cm_disconnects);
35467 + atomic_inc_unchecked(&cm_disconnects);
35468 cm_event.event = IW_CM_EVENT_DISCONNECT;
35469 cm_event.status = disconn_status;
35470 cm_event.local_addr = cm_id->local_addr;
35471 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35472 }
35473
35474 if (issue_close) {
35475 - atomic_inc(&cm_closes);
35476 + atomic_inc_unchecked(&cm_closes);
35477 nes_disconnect(nesqp, 1);
35478
35479 cm_id->provider_data = nesqp;
35480 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35481
35482 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35483 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35484 - atomic_inc(&cm_accepts);
35485 + atomic_inc_unchecked(&cm_accepts);
35486
35487 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35488 atomic_read(&nesvnic->netdev->refcnt));
35489 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35490
35491 struct nes_cm_core *cm_core;
35492
35493 - atomic_inc(&cm_rejects);
35494 + atomic_inc_unchecked(&cm_rejects);
35495 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35496 loopback = cm_node->loopbackpartner;
35497 cm_core = cm_node->cm_core;
35498 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35499 ntohl(cm_id->local_addr.sin_addr.s_addr),
35500 ntohs(cm_id->local_addr.sin_port));
35501
35502 - atomic_inc(&cm_connects);
35503 + atomic_inc_unchecked(&cm_connects);
35504 nesqp->active_conn = 1;
35505
35506 /* cache the cm_id in the qp */
35507 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35508 if (nesqp->destroyed) {
35509 return;
35510 }
35511 - atomic_inc(&cm_connecteds);
35512 + atomic_inc_unchecked(&cm_connecteds);
35513 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35514 " local port 0x%04X. jiffies = %lu.\n",
35515 nesqp->hwqp.qp_id,
35516 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35517
35518 ret = cm_id->event_handler(cm_id, &cm_event);
35519 cm_id->add_ref(cm_id);
35520 - atomic_inc(&cm_closes);
35521 + atomic_inc_unchecked(&cm_closes);
35522 cm_event.event = IW_CM_EVENT_CLOSE;
35523 cm_event.status = IW_CM_EVENT_STATUS_OK;
35524 cm_event.provider_data = cm_id->provider_data;
35525 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35526 return;
35527 cm_id = cm_node->cm_id;
35528
35529 - atomic_inc(&cm_connect_reqs);
35530 + atomic_inc_unchecked(&cm_connect_reqs);
35531 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35532 cm_node, cm_id, jiffies);
35533
35534 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35535 return;
35536 cm_id = cm_node->cm_id;
35537
35538 - atomic_inc(&cm_connect_reqs);
35539 + atomic_inc_unchecked(&cm_connect_reqs);
35540 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35541 cm_node, cm_id, jiffies);
35542
35543 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35544 index e593af3..870694a 100644
35545 --- a/drivers/infiniband/hw/nes/nes_nic.c
35546 +++ b/drivers/infiniband/hw/nes/nes_nic.c
35547 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35548 target_stat_values[++index] = mh_detected;
35549 target_stat_values[++index] = mh_pauses_sent;
35550 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35551 - target_stat_values[++index] = atomic_read(&cm_connects);
35552 - target_stat_values[++index] = atomic_read(&cm_accepts);
35553 - target_stat_values[++index] = atomic_read(&cm_disconnects);
35554 - target_stat_values[++index] = atomic_read(&cm_connecteds);
35555 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35556 - target_stat_values[++index] = atomic_read(&cm_rejects);
35557 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35558 - target_stat_values[++index] = atomic_read(&qps_created);
35559 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35560 - target_stat_values[++index] = atomic_read(&qps_destroyed);
35561 - target_stat_values[++index] = atomic_read(&cm_closes);
35562 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35563 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35564 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35565 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35566 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35567 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35568 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35569 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35570 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35571 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35572 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35573 target_stat_values[++index] = cm_packets_sent;
35574 target_stat_values[++index] = cm_packets_bounced;
35575 target_stat_values[++index] = cm_packets_created;
35576 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35577 target_stat_values[++index] = cm_listens_created;
35578 target_stat_values[++index] = cm_listens_destroyed;
35579 target_stat_values[++index] = cm_backlog_drops;
35580 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
35581 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
35582 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35583 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35584 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35585 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35586 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35587 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35588 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35589 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35590 target_stat_values[++index] = int_mod_timer_init;
35591 target_stat_values[++index] = int_mod_cq_depth_1;
35592 target_stat_values[++index] = int_mod_cq_depth_4;
35593 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35594 index a680c42..f914deb 100644
35595 --- a/drivers/infiniband/hw/nes/nes_verbs.c
35596 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
35597 @@ -45,9 +45,9 @@
35598
35599 #include <rdma/ib_umem.h>
35600
35601 -atomic_t mod_qp_timouts;
35602 -atomic_t qps_created;
35603 -atomic_t sw_qps_destroyed;
35604 +atomic_unchecked_t mod_qp_timouts;
35605 +atomic_unchecked_t qps_created;
35606 +atomic_unchecked_t sw_qps_destroyed;
35607
35608 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35609
35610 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35611 if (init_attr->create_flags)
35612 return ERR_PTR(-EINVAL);
35613
35614 - atomic_inc(&qps_created);
35615 + atomic_inc_unchecked(&qps_created);
35616 switch (init_attr->qp_type) {
35617 case IB_QPT_RC:
35618 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35619 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35620 struct iw_cm_event cm_event;
35621 int ret;
35622
35623 - atomic_inc(&sw_qps_destroyed);
35624 + atomic_inc_unchecked(&sw_qps_destroyed);
35625 nesqp->destroyed = 1;
35626
35627 /* Blow away the connection if it exists. */
35628 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35629 index ac11be0..3883c04 100644
35630 --- a/drivers/input/gameport/gameport.c
35631 +++ b/drivers/input/gameport/gameport.c
35632 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35633 */
35634 static void gameport_init_port(struct gameport *gameport)
35635 {
35636 - static atomic_t gameport_no = ATOMIC_INIT(0);
35637 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35638
35639 __module_get(THIS_MODULE);
35640
35641 mutex_init(&gameport->drv_mutex);
35642 device_initialize(&gameport->dev);
35643 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35644 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35645 gameport->dev.bus = &gameport_bus;
35646 gameport->dev.release = gameport_release_port;
35647 if (gameport->parent)
35648 diff --git a/drivers/input/input.c b/drivers/input/input.c
35649 index c82ae82..8cfb9cb 100644
35650 --- a/drivers/input/input.c
35651 +++ b/drivers/input/input.c
35652 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35653 */
35654 int input_register_device(struct input_dev *dev)
35655 {
35656 - static atomic_t input_no = ATOMIC_INIT(0);
35657 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35658 struct input_handler *handler;
35659 const char *path;
35660 int error;
35661 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35662 dev->setkeycode = input_default_setkeycode;
35663
35664 dev_set_name(&dev->dev, "input%ld",
35665 - (unsigned long) atomic_inc_return(&input_no) - 1);
35666 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35667
35668 error = device_add(&dev->dev);
35669 if (error)
35670 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35671 index ca13a6b..b032b0c 100644
35672 --- a/drivers/input/joystick/sidewinder.c
35673 +++ b/drivers/input/joystick/sidewinder.c
35674 @@ -30,6 +30,7 @@
35675 #include <linux/kernel.h>
35676 #include <linux/module.h>
35677 #include <linux/slab.h>
35678 +#include <linux/sched.h>
35679 #include <linux/init.h>
35680 #include <linux/input.h>
35681 #include <linux/gameport.h>
35682 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35683 unsigned char buf[SW_LENGTH];
35684 int i;
35685
35686 + pax_track_stack();
35687 +
35688 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35689
35690 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35691 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35692 index 79e3edc..01412b9 100644
35693 --- a/drivers/input/joystick/xpad.c
35694 +++ b/drivers/input/joystick/xpad.c
35695 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35696
35697 static int xpad_led_probe(struct usb_xpad *xpad)
35698 {
35699 - static atomic_t led_seq = ATOMIC_INIT(0);
35700 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35701 long led_no;
35702 struct xpad_led *led;
35703 struct led_classdev *led_cdev;
35704 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35705 if (!led)
35706 return -ENOMEM;
35707
35708 - led_no = (long)atomic_inc_return(&led_seq) - 1;
35709 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35710
35711 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35712 led->xpad = xpad;
35713 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35714 index 0236f0d..c7327f1 100644
35715 --- a/drivers/input/serio/serio.c
35716 +++ b/drivers/input/serio/serio.c
35717 @@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35718 */
35719 static void serio_init_port(struct serio *serio)
35720 {
35721 - static atomic_t serio_no = ATOMIC_INIT(0);
35722 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35723
35724 __module_get(THIS_MODULE);
35725
35726 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35727 mutex_init(&serio->drv_mutex);
35728 device_initialize(&serio->dev);
35729 dev_set_name(&serio->dev, "serio%ld",
35730 - (long)atomic_inc_return(&serio_no) - 1);
35731 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
35732 serio->dev.bus = &serio_bus;
35733 serio->dev.release = serio_release_port;
35734 if (serio->parent) {
35735 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35736 index 33dcd8d..2783d25 100644
35737 --- a/drivers/isdn/gigaset/common.c
35738 +++ b/drivers/isdn/gigaset/common.c
35739 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35740 cs->commands_pending = 0;
35741 cs->cur_at_seq = 0;
35742 cs->gotfwver = -1;
35743 - cs->open_count = 0;
35744 + local_set(&cs->open_count, 0);
35745 cs->dev = NULL;
35746 cs->tty = NULL;
35747 cs->tty_dev = NULL;
35748 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35749 index a2f6125..6a70677 100644
35750 --- a/drivers/isdn/gigaset/gigaset.h
35751 +++ b/drivers/isdn/gigaset/gigaset.h
35752 @@ -34,6 +34,7 @@
35753 #include <linux/tty_driver.h>
35754 #include <linux/list.h>
35755 #include <asm/atomic.h>
35756 +#include <asm/local.h>
35757
35758 #define GIG_VERSION {0,5,0,0}
35759 #define GIG_COMPAT {0,4,0,0}
35760 @@ -446,7 +447,7 @@ struct cardstate {
35761 spinlock_t cmdlock;
35762 unsigned curlen, cmdbytes;
35763
35764 - unsigned open_count;
35765 + local_t open_count;
35766 struct tty_struct *tty;
35767 struct tasklet_struct if_wake_tasklet;
35768 unsigned control_state;
35769 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35770 index b3065b8..c7e8cc9 100644
35771 --- a/drivers/isdn/gigaset/interface.c
35772 +++ b/drivers/isdn/gigaset/interface.c
35773 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35774 return -ERESTARTSYS; // FIXME -EINTR?
35775 tty->driver_data = cs;
35776
35777 - ++cs->open_count;
35778 -
35779 - if (cs->open_count == 1) {
35780 + if (local_inc_return(&cs->open_count) == 1) {
35781 spin_lock_irqsave(&cs->lock, flags);
35782 cs->tty = tty;
35783 spin_unlock_irqrestore(&cs->lock, flags);
35784 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35785
35786 if (!cs->connected)
35787 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35788 - else if (!cs->open_count)
35789 + else if (!local_read(&cs->open_count))
35790 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35791 else {
35792 - if (!--cs->open_count) {
35793 + if (!local_dec_return(&cs->open_count)) {
35794 spin_lock_irqsave(&cs->lock, flags);
35795 cs->tty = NULL;
35796 spin_unlock_irqrestore(&cs->lock, flags);
35797 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35798 if (!cs->connected) {
35799 gig_dbg(DEBUG_IF, "not connected");
35800 retval = -ENODEV;
35801 - } else if (!cs->open_count)
35802 + } else if (!local_read(&cs->open_count))
35803 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35804 else {
35805 retval = 0;
35806 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35807 if (!cs->connected) {
35808 gig_dbg(DEBUG_IF, "not connected");
35809 retval = -ENODEV;
35810 - } else if (!cs->open_count)
35811 + } else if (!local_read(&cs->open_count))
35812 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35813 else if (cs->mstate != MS_LOCKED) {
35814 dev_warn(cs->dev, "can't write to unlocked device\n");
35815 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35816 if (!cs->connected) {
35817 gig_dbg(DEBUG_IF, "not connected");
35818 retval = -ENODEV;
35819 - } else if (!cs->open_count)
35820 + } else if (!local_read(&cs->open_count))
35821 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35822 else if (cs->mstate != MS_LOCKED) {
35823 dev_warn(cs->dev, "can't write to unlocked device\n");
35824 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35825
35826 if (!cs->connected)
35827 gig_dbg(DEBUG_IF, "not connected");
35828 - else if (!cs->open_count)
35829 + else if (!local_read(&cs->open_count))
35830 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35831 else if (cs->mstate != MS_LOCKED)
35832 dev_warn(cs->dev, "can't write to unlocked device\n");
35833 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35834
35835 if (!cs->connected)
35836 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35837 - else if (!cs->open_count)
35838 + else if (!local_read(&cs->open_count))
35839 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35840 else {
35841 //FIXME
35842 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35843
35844 if (!cs->connected)
35845 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35846 - else if (!cs->open_count)
35847 + else if (!local_read(&cs->open_count))
35848 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35849 else {
35850 //FIXME
35851 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35852 goto out;
35853 }
35854
35855 - if (!cs->open_count) {
35856 + if (!local_read(&cs->open_count)) {
35857 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35858 goto out;
35859 }
35860 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35861 index a7c0083..62a7cb6 100644
35862 --- a/drivers/isdn/hardware/avm/b1.c
35863 +++ b/drivers/isdn/hardware/avm/b1.c
35864 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35865 }
35866 if (left) {
35867 if (t4file->user) {
35868 - if (copy_from_user(buf, dp, left))
35869 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35870 return -EFAULT;
35871 } else {
35872 memcpy(buf, dp, left);
35873 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35874 }
35875 if (left) {
35876 if (config->user) {
35877 - if (copy_from_user(buf, dp, left))
35878 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35879 return -EFAULT;
35880 } else {
35881 memcpy(buf, dp, left);
35882 diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35883 index f130724..c373c68 100644
35884 --- a/drivers/isdn/hardware/eicon/capidtmf.c
35885 +++ b/drivers/isdn/hardware/eicon/capidtmf.c
35886 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35887 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35888 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35889
35890 + pax_track_stack();
35891
35892 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35893 {
35894 diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35895 index 4d425c6..a9be6c4 100644
35896 --- a/drivers/isdn/hardware/eicon/capifunc.c
35897 +++ b/drivers/isdn/hardware/eicon/capifunc.c
35898 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35899 IDI_SYNC_REQ req;
35900 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35901
35902 + pax_track_stack();
35903 +
35904 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35905
35906 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35907 diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35908 index 3029234..ef0d9e2 100644
35909 --- a/drivers/isdn/hardware/eicon/diddfunc.c
35910 +++ b/drivers/isdn/hardware/eicon/diddfunc.c
35911 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35912 IDI_SYNC_REQ req;
35913 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35914
35915 + pax_track_stack();
35916 +
35917 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35918
35919 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35920 diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35921 index d36a4c0..11e7d1a 100644
35922 --- a/drivers/isdn/hardware/eicon/divasfunc.c
35923 +++ b/drivers/isdn/hardware/eicon/divasfunc.c
35924 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35925 IDI_SYNC_REQ req;
35926 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35927
35928 + pax_track_stack();
35929 +
35930 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35931
35932 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35933 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35934 index 85784a7..a19ca98 100644
35935 --- a/drivers/isdn/hardware/eicon/divasync.h
35936 +++ b/drivers/isdn/hardware/eicon/divasync.h
35937 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35938 } diva_didd_add_adapter_t;
35939 typedef struct _diva_didd_remove_adapter {
35940 IDI_CALL p_request;
35941 -} diva_didd_remove_adapter_t;
35942 +} __no_const diva_didd_remove_adapter_t;
35943 typedef struct _diva_didd_read_adapter_array {
35944 void * buffer;
35945 dword length;
35946 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35947 index db87d51..7d09acf 100644
35948 --- a/drivers/isdn/hardware/eicon/idifunc.c
35949 +++ b/drivers/isdn/hardware/eicon/idifunc.c
35950 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35951 IDI_SYNC_REQ req;
35952 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35953
35954 + pax_track_stack();
35955 +
35956 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35957
35958 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35959 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35960 index ae89fb8..0fab299 100644
35961 --- a/drivers/isdn/hardware/eicon/message.c
35962 +++ b/drivers/isdn/hardware/eicon/message.c
35963 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35964 dword d;
35965 word w;
35966
35967 + pax_track_stack();
35968 +
35969 a = plci->adapter;
35970 Id = ((word)plci->Id<<8)|a->Id;
35971 PUT_WORD(&SS_Ind[4],0x0000);
35972 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35973 word j, n, w;
35974 dword d;
35975
35976 + pax_track_stack();
35977 +
35978
35979 for(i=0;i<8;i++) bp_parms[i].length = 0;
35980 for(i=0;i<2;i++) global_config[i].length = 0;
35981 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35982 const byte llc3[] = {4,3,2,2,6,6,0};
35983 const byte header[] = {0,2,3,3,0,0,0};
35984
35985 + pax_track_stack();
35986 +
35987 for(i=0;i<8;i++) bp_parms[i].length = 0;
35988 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35989 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35990 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35991 word appl_number_group_type[MAX_APPL];
35992 PLCI *auxplci;
35993
35994 + pax_track_stack();
35995 +
35996 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35997
35998 if(!a->group_optimization_enabled)
35999 diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
36000 index a564b75..f3cf8b5 100644
36001 --- a/drivers/isdn/hardware/eicon/mntfunc.c
36002 +++ b/drivers/isdn/hardware/eicon/mntfunc.c
36003 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
36004 IDI_SYNC_REQ req;
36005 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
36006
36007 + pax_track_stack();
36008 +
36009 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
36010
36011 for (x = 0; x < MAX_DESCRIPTORS; x++) {
36012 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
36013 index a3bd163..8956575 100644
36014 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
36015 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
36016 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
36017 typedef struct _diva_os_idi_adapter_interface {
36018 diva_init_card_proc_t cleanup_adapter_proc;
36019 diva_cmd_card_proc_t cmd_proc;
36020 -} diva_os_idi_adapter_interface_t;
36021 +} __no_const diva_os_idi_adapter_interface_t;
36022
36023 typedef struct _diva_os_xdi_adapter {
36024 struct list_head link;
36025 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
36026 index adb1e8c..21b590b 100644
36027 --- a/drivers/isdn/i4l/isdn_common.c
36028 +++ b/drivers/isdn/i4l/isdn_common.c
36029 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
36030 } iocpar;
36031 void __user *argp = (void __user *)arg;
36032
36033 + pax_track_stack();
36034 +
36035 #define name iocpar.name
36036 #define bname iocpar.bname
36037 #define iocts iocpar.iocts
36038 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
36039 index 90b56ed..5ed3305 100644
36040 --- a/drivers/isdn/i4l/isdn_net.c
36041 +++ b/drivers/isdn/i4l/isdn_net.c
36042 @@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
36043 {
36044 isdn_net_local *lp = netdev_priv(dev);
36045 unsigned char *p;
36046 - ushort len = 0;
36047 + int len = 0;
36048
36049 switch (lp->p_encap) {
36050 case ISDN_NET_ENCAP_ETHER:
36051 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
36052 index bf7997a..cf091db 100644
36053 --- a/drivers/isdn/icn/icn.c
36054 +++ b/drivers/isdn/icn/icn.c
36055 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
36056 if (count > len)
36057 count = len;
36058 if (user) {
36059 - if (copy_from_user(msg, buf, count))
36060 + if (count > sizeof msg || copy_from_user(msg, buf, count))
36061 return -EFAULT;
36062 } else
36063 memcpy(msg, buf, count);
36064 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
36065 index feb0fa4..f76f830 100644
36066 --- a/drivers/isdn/mISDN/socket.c
36067 +++ b/drivers/isdn/mISDN/socket.c
36068 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36069 if (dev) {
36070 struct mISDN_devinfo di;
36071
36072 + memset(&di, 0, sizeof(di));
36073 di.id = dev->id;
36074 di.Dprotocols = dev->Dprotocols;
36075 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36076 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36077 if (dev) {
36078 struct mISDN_devinfo di;
36079
36080 + memset(&di, 0, sizeof(di));
36081 di.id = dev->id;
36082 di.Dprotocols = dev->Dprotocols;
36083 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36084 diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
36085 index 485be8b..f0225bc 100644
36086 --- a/drivers/isdn/sc/interrupt.c
36087 +++ b/drivers/isdn/sc/interrupt.c
36088 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36089 }
36090 else if(callid>=0x0000 && callid<=0x7FFF)
36091 {
36092 + int len;
36093 +
36094 pr_debug("%s: Got Incoming Call\n",
36095 sc_adapter[card]->devicename);
36096 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
36097 - strcpy(setup.eazmsn,
36098 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
36099 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
36100 + sizeof(setup.phone));
36101 + if (len >= sizeof(setup.phone))
36102 + continue;
36103 + len = strlcpy(setup.eazmsn,
36104 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36105 + sizeof(setup.eazmsn));
36106 + if (len >= sizeof(setup.eazmsn))
36107 + continue;
36108 setup.si1 = 7;
36109 setup.si2 = 0;
36110 setup.plan = 0;
36111 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36112 * Handle a GetMyNumber Rsp
36113 */
36114 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
36115 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
36116 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36117 + rcvmsg.msg_data.byte_array,
36118 + sizeof(rcvmsg.msg_data.byte_array));
36119 continue;
36120 }
36121
36122 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
36123 index 8744d24..d1f9a9a 100644
36124 --- a/drivers/lguest/core.c
36125 +++ b/drivers/lguest/core.c
36126 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
36127 * it's worked so far. The end address needs +1 because __get_vm_area
36128 * allocates an extra guard page, so we need space for that.
36129 */
36130 +
36131 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
36132 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36133 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
36134 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36135 +#else
36136 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36137 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
36138 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36139 +#endif
36140 +
36141 if (!switcher_vma) {
36142 err = -ENOMEM;
36143 printk("lguest: could not map switcher pages high\n");
36144 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
36145 * Now the Switcher is mapped at the right address, we can't fail!
36146 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
36147 */
36148 - memcpy(switcher_vma->addr, start_switcher_text,
36149 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
36150 end_switcher_text - start_switcher_text);
36151
36152 printk(KERN_INFO "lguest: mapped switcher at %p\n",
36153 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
36154 index 6ae3888..8b38145 100644
36155 --- a/drivers/lguest/x86/core.c
36156 +++ b/drivers/lguest/x86/core.c
36157 @@ -59,7 +59,7 @@ static struct {
36158 /* Offset from where switcher.S was compiled to where we've copied it */
36159 static unsigned long switcher_offset(void)
36160 {
36161 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
36162 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
36163 }
36164
36165 /* This cpu's struct lguest_pages. */
36166 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
36167 * These copies are pretty cheap, so we do them unconditionally: */
36168 /* Save the current Host top-level page directory.
36169 */
36170 +
36171 +#ifdef CONFIG_PAX_PER_CPU_PGD
36172 + pages->state.host_cr3 = read_cr3();
36173 +#else
36174 pages->state.host_cr3 = __pa(current->mm->pgd);
36175 +#endif
36176 +
36177 /*
36178 * Set up the Guest's page tables to see this CPU's pages (and no
36179 * other CPU's pages).
36180 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
36181 * compiled-in switcher code and the high-mapped copy we just made.
36182 */
36183 for (i = 0; i < IDT_ENTRIES; i++)
36184 - default_idt_entries[i] += switcher_offset();
36185 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
36186
36187 /*
36188 * Set up the Switcher's per-cpu areas.
36189 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
36190 * it will be undisturbed when we switch. To change %cs and jump we
36191 * need this structure to feed to Intel's "lcall" instruction.
36192 */
36193 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
36194 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
36195 lguest_entry.segment = LGUEST_CS;
36196
36197 /*
36198 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
36199 index 40634b0..4f5855e 100644
36200 --- a/drivers/lguest/x86/switcher_32.S
36201 +++ b/drivers/lguest/x86/switcher_32.S
36202 @@ -87,6 +87,7 @@
36203 #include <asm/page.h>
36204 #include <asm/segment.h>
36205 #include <asm/lguest.h>
36206 +#include <asm/processor-flags.h>
36207
36208 // We mark the start of the code to copy
36209 // It's placed in .text tho it's never run here
36210 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
36211 // Changes type when we load it: damn Intel!
36212 // For after we switch over our page tables
36213 // That entry will be read-only: we'd crash.
36214 +
36215 +#ifdef CONFIG_PAX_KERNEXEC
36216 + mov %cr0, %edx
36217 + xor $X86_CR0_WP, %edx
36218 + mov %edx, %cr0
36219 +#endif
36220 +
36221 movl $(GDT_ENTRY_TSS*8), %edx
36222 ltr %dx
36223
36224 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
36225 // Let's clear it again for our return.
36226 // The GDT descriptor of the Host
36227 // Points to the table after two "size" bytes
36228 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
36229 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
36230 // Clear "used" from type field (byte 5, bit 2)
36231 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
36232 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
36233 +
36234 +#ifdef CONFIG_PAX_KERNEXEC
36235 + mov %cr0, %eax
36236 + xor $X86_CR0_WP, %eax
36237 + mov %eax, %cr0
36238 +#endif
36239
36240 // Once our page table's switched, the Guest is live!
36241 // The Host fades as we run this final step.
36242 @@ -295,13 +309,12 @@ deliver_to_host:
36243 // I consulted gcc, and it gave
36244 // These instructions, which I gladly credit:
36245 leal (%edx,%ebx,8), %eax
36246 - movzwl (%eax),%edx
36247 - movl 4(%eax), %eax
36248 - xorw %ax, %ax
36249 - orl %eax, %edx
36250 + movl 4(%eax), %edx
36251 + movw (%eax), %dx
36252 // Now the address of the handler's in %edx
36253 // We call it now: its "iret" drops us home.
36254 - jmp *%edx
36255 + ljmp $__KERNEL_CS, $1f
36256 +1: jmp *%edx
36257
36258 // Every interrupt can come to us here
36259 // But we must truly tell each apart.
36260 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
36261 index 588a5b0..b71db89 100644
36262 --- a/drivers/macintosh/macio_asic.c
36263 +++ b/drivers/macintosh/macio_asic.c
36264 @@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
36265 * MacIO is matched against any Apple ID, it's probe() function
36266 * will then decide wether it applies or not
36267 */
36268 -static const struct pci_device_id __devinitdata pci_ids [] = { {
36269 +static const struct pci_device_id __devinitconst pci_ids [] = { {
36270 .vendor = PCI_VENDOR_ID_APPLE,
36271 .device = PCI_ANY_ID,
36272 .subvendor = PCI_ANY_ID,
36273 diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
36274 index a348bb0..ecd9b3f 100644
36275 --- a/drivers/macintosh/via-pmu-backlight.c
36276 +++ b/drivers/macintosh/via-pmu-backlight.c
36277 @@ -15,7 +15,7 @@
36278
36279 #define MAX_PMU_LEVEL 0xFF
36280
36281 -static struct backlight_ops pmu_backlight_data;
36282 +static const struct backlight_ops pmu_backlight_data;
36283 static DEFINE_SPINLOCK(pmu_backlight_lock);
36284 static int sleeping, uses_pmu_bl;
36285 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
36286 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
36287 return bd->props.brightness;
36288 }
36289
36290 -static struct backlight_ops pmu_backlight_data = {
36291 +static const struct backlight_ops pmu_backlight_data = {
36292 .get_brightness = pmu_backlight_get_brightness,
36293 .update_status = pmu_backlight_update_status,
36294
36295 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
36296 index 6f308a4..b5f7ff7 100644
36297 --- a/drivers/macintosh/via-pmu.c
36298 +++ b/drivers/macintosh/via-pmu.c
36299 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
36300 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
36301 }
36302
36303 -static struct platform_suspend_ops pmu_pm_ops = {
36304 +static const struct platform_suspend_ops pmu_pm_ops = {
36305 .enter = powerbook_sleep,
36306 .valid = pmu_sleep_valid,
36307 };
36308 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
36309 index 818b617..4656e38 100644
36310 --- a/drivers/md/dm-ioctl.c
36311 +++ b/drivers/md/dm-ioctl.c
36312 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
36313 cmd == DM_LIST_VERSIONS_CMD)
36314 return 0;
36315
36316 - if ((cmd == DM_DEV_CREATE_CMD)) {
36317 + if (cmd == DM_DEV_CREATE_CMD) {
36318 if (!*param->name) {
36319 DMWARN("name not supplied when creating device");
36320 return -EINVAL;
36321 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
36322 index 6021d0a..a878643 100644
36323 --- a/drivers/md/dm-raid1.c
36324 +++ b/drivers/md/dm-raid1.c
36325 @@ -41,7 +41,7 @@ enum dm_raid1_error {
36326
36327 struct mirror {
36328 struct mirror_set *ms;
36329 - atomic_t error_count;
36330 + atomic_unchecked_t error_count;
36331 unsigned long error_type;
36332 struct dm_dev *dev;
36333 sector_t offset;
36334 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36335 * simple way to tell if a device has encountered
36336 * errors.
36337 */
36338 - atomic_inc(&m->error_count);
36339 + atomic_inc_unchecked(&m->error_count);
36340
36341 if (test_and_set_bit(error_type, &m->error_type))
36342 return;
36343 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36344 }
36345
36346 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
36347 - if (!atomic_read(&new->error_count)) {
36348 + if (!atomic_read_unchecked(&new->error_count)) {
36349 set_default_mirror(new);
36350 break;
36351 }
36352 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
36353 struct mirror *m = get_default_mirror(ms);
36354
36355 do {
36356 - if (likely(!atomic_read(&m->error_count)))
36357 + if (likely(!atomic_read_unchecked(&m->error_count)))
36358 return m;
36359
36360 if (m-- == ms->mirror)
36361 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
36362 {
36363 struct mirror *default_mirror = get_default_mirror(m->ms);
36364
36365 - return !atomic_read(&default_mirror->error_count);
36366 + return !atomic_read_unchecked(&default_mirror->error_count);
36367 }
36368
36369 static int mirror_available(struct mirror_set *ms, struct bio *bio)
36370 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
36371 */
36372 if (likely(region_in_sync(ms, region, 1)))
36373 m = choose_mirror(ms, bio->bi_sector);
36374 - else if (m && atomic_read(&m->error_count))
36375 + else if (m && atomic_read_unchecked(&m->error_count))
36376 m = NULL;
36377
36378 if (likely(m))
36379 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
36380 }
36381
36382 ms->mirror[mirror].ms = ms;
36383 - atomic_set(&(ms->mirror[mirror].error_count), 0);
36384 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
36385 ms->mirror[mirror].error_type = 0;
36386 ms->mirror[mirror].offset = offset;
36387
36388 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
36389 */
36390 static char device_status_char(struct mirror *m)
36391 {
36392 - if (!atomic_read(&(m->error_count)))
36393 + if (!atomic_read_unchecked(&(m->error_count)))
36394 return 'A';
36395
36396 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
36397 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
36398 index bd58703..9f26571 100644
36399 --- a/drivers/md/dm-stripe.c
36400 +++ b/drivers/md/dm-stripe.c
36401 @@ -20,7 +20,7 @@ struct stripe {
36402 struct dm_dev *dev;
36403 sector_t physical_start;
36404
36405 - atomic_t error_count;
36406 + atomic_unchecked_t error_count;
36407 };
36408
36409 struct stripe_c {
36410 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
36411 kfree(sc);
36412 return r;
36413 }
36414 - atomic_set(&(sc->stripe[i].error_count), 0);
36415 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
36416 }
36417
36418 ti->private = sc;
36419 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
36420 DMEMIT("%d ", sc->stripes);
36421 for (i = 0; i < sc->stripes; i++) {
36422 DMEMIT("%s ", sc->stripe[i].dev->name);
36423 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
36424 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
36425 'D' : 'A';
36426 }
36427 buffer[i] = '\0';
36428 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
36429 */
36430 for (i = 0; i < sc->stripes; i++)
36431 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
36432 - atomic_inc(&(sc->stripe[i].error_count));
36433 - if (atomic_read(&(sc->stripe[i].error_count)) <
36434 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
36435 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
36436 DM_IO_ERROR_THRESHOLD)
36437 queue_work(kstriped, &sc->kstriped_ws);
36438 }
36439 diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
36440 index 4b04590..13a77b2 100644
36441 --- a/drivers/md/dm-sysfs.c
36442 +++ b/drivers/md/dm-sysfs.c
36443 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
36444 NULL,
36445 };
36446
36447 -static struct sysfs_ops dm_sysfs_ops = {
36448 +static const struct sysfs_ops dm_sysfs_ops = {
36449 .show = dm_attr_show,
36450 };
36451
36452 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36453 index 03345bb..332250d 100644
36454 --- a/drivers/md/dm-table.c
36455 +++ b/drivers/md/dm-table.c
36456 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36457 if (!dev_size)
36458 return 0;
36459
36460 - if ((start >= dev_size) || (start + len > dev_size)) {
36461 + if ((start >= dev_size) || (len > dev_size - start)) {
36462 DMWARN("%s: %s too small for target: "
36463 "start=%llu, len=%llu, dev_size=%llu",
36464 dm_device_name(ti->table->md), bdevname(bdev, b),
36465 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36466 index c988ac2..c418141 100644
36467 --- a/drivers/md/dm.c
36468 +++ b/drivers/md/dm.c
36469 @@ -165,9 +165,9 @@ struct mapped_device {
36470 /*
36471 * Event handling.
36472 */
36473 - atomic_t event_nr;
36474 + atomic_unchecked_t event_nr;
36475 wait_queue_head_t eventq;
36476 - atomic_t uevent_seq;
36477 + atomic_unchecked_t uevent_seq;
36478 struct list_head uevent_list;
36479 spinlock_t uevent_lock; /* Protect access to uevent_list */
36480
36481 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36482 rwlock_init(&md->map_lock);
36483 atomic_set(&md->holders, 1);
36484 atomic_set(&md->open_count, 0);
36485 - atomic_set(&md->event_nr, 0);
36486 - atomic_set(&md->uevent_seq, 0);
36487 + atomic_set_unchecked(&md->event_nr, 0);
36488 + atomic_set_unchecked(&md->uevent_seq, 0);
36489 INIT_LIST_HEAD(&md->uevent_list);
36490 spin_lock_init(&md->uevent_lock);
36491
36492 @@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36493
36494 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36495
36496 - atomic_inc(&md->event_nr);
36497 + atomic_inc_unchecked(&md->event_nr);
36498 wake_up(&md->eventq);
36499 }
36500
36501 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36502
36503 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36504 {
36505 - return atomic_add_return(1, &md->uevent_seq);
36506 + return atomic_add_return_unchecked(1, &md->uevent_seq);
36507 }
36508
36509 uint32_t dm_get_event_nr(struct mapped_device *md)
36510 {
36511 - return atomic_read(&md->event_nr);
36512 + return atomic_read_unchecked(&md->event_nr);
36513 }
36514
36515 int dm_wait_event(struct mapped_device *md, int event_nr)
36516 {
36517 return wait_event_interruptible(md->eventq,
36518 - (event_nr != atomic_read(&md->event_nr)));
36519 + (event_nr != atomic_read_unchecked(&md->event_nr)));
36520 }
36521
36522 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36523 diff --git a/drivers/md/md.c b/drivers/md/md.c
36524 index 4ce6e2f..7a9530a 100644
36525 --- a/drivers/md/md.c
36526 +++ b/drivers/md/md.c
36527 @@ -153,10 +153,10 @@ static int start_readonly;
36528 * start build, activate spare
36529 */
36530 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36531 -static atomic_t md_event_count;
36532 +static atomic_unchecked_t md_event_count;
36533 void md_new_event(mddev_t *mddev)
36534 {
36535 - atomic_inc(&md_event_count);
36536 + atomic_inc_unchecked(&md_event_count);
36537 wake_up(&md_event_waiters);
36538 }
36539 EXPORT_SYMBOL_GPL(md_new_event);
36540 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36541 */
36542 static void md_new_event_inintr(mddev_t *mddev)
36543 {
36544 - atomic_inc(&md_event_count);
36545 + atomic_inc_unchecked(&md_event_count);
36546 wake_up(&md_event_waiters);
36547 }
36548
36549 @@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36550
36551 rdev->preferred_minor = 0xffff;
36552 rdev->data_offset = le64_to_cpu(sb->data_offset);
36553 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36554 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36555
36556 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36557 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36558 @@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36559 else
36560 sb->resync_offset = cpu_to_le64(0);
36561
36562 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36563 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36564
36565 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36566 sb->size = cpu_to_le64(mddev->dev_sectors);
36567 @@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36568 static ssize_t
36569 errors_show(mdk_rdev_t *rdev, char *page)
36570 {
36571 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36572 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36573 }
36574
36575 static ssize_t
36576 @@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36577 char *e;
36578 unsigned long n = simple_strtoul(buf, &e, 10);
36579 if (*buf && (*e == 0 || *e == '\n')) {
36580 - atomic_set(&rdev->corrected_errors, n);
36581 + atomic_set_unchecked(&rdev->corrected_errors, n);
36582 return len;
36583 }
36584 return -EINVAL;
36585 @@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36586 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36587 kfree(rdev);
36588 }
36589 -static struct sysfs_ops rdev_sysfs_ops = {
36590 +static const struct sysfs_ops rdev_sysfs_ops = {
36591 .show = rdev_attr_show,
36592 .store = rdev_attr_store,
36593 };
36594 @@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36595 rdev->data_offset = 0;
36596 rdev->sb_events = 0;
36597 atomic_set(&rdev->nr_pending, 0);
36598 - atomic_set(&rdev->read_errors, 0);
36599 - atomic_set(&rdev->corrected_errors, 0);
36600 + atomic_set_unchecked(&rdev->read_errors, 0);
36601 + atomic_set_unchecked(&rdev->corrected_errors, 0);
36602
36603 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36604 if (!size) {
36605 @@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36606 kfree(mddev);
36607 }
36608
36609 -static struct sysfs_ops md_sysfs_ops = {
36610 +static const struct sysfs_ops md_sysfs_ops = {
36611 .show = md_attr_show,
36612 .store = md_attr_store,
36613 };
36614 @@ -4482,7 +4482,8 @@ out:
36615 err = 0;
36616 blk_integrity_unregister(disk);
36617 md_new_event(mddev);
36618 - sysfs_notify_dirent(mddev->sysfs_state);
36619 + if (mddev->sysfs_state)
36620 + sysfs_notify_dirent(mddev->sysfs_state);
36621 return err;
36622 }
36623
36624 @@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36625
36626 spin_unlock(&pers_lock);
36627 seq_printf(seq, "\n");
36628 - mi->event = atomic_read(&md_event_count);
36629 + mi->event = atomic_read_unchecked(&md_event_count);
36630 return 0;
36631 }
36632 if (v == (void*)2) {
36633 @@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36634 chunk_kb ? "KB" : "B");
36635 if (bitmap->file) {
36636 seq_printf(seq, ", file: ");
36637 - seq_path(seq, &bitmap->file->f_path, " \t\n");
36638 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36639 }
36640
36641 seq_printf(seq, "\n");
36642 @@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36643 else {
36644 struct seq_file *p = file->private_data;
36645 p->private = mi;
36646 - mi->event = atomic_read(&md_event_count);
36647 + mi->event = atomic_read_unchecked(&md_event_count);
36648 }
36649 return error;
36650 }
36651 @@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36652 /* always allow read */
36653 mask = POLLIN | POLLRDNORM;
36654
36655 - if (mi->event != atomic_read(&md_event_count))
36656 + if (mi->event != atomic_read_unchecked(&md_event_count))
36657 mask |= POLLERR | POLLPRI;
36658 return mask;
36659 }
36660 @@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36661 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36662 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36663 (int)part_stat_read(&disk->part0, sectors[1]) -
36664 - atomic_read(&disk->sync_io);
36665 + atomic_read_unchecked(&disk->sync_io);
36666 /* sync IO will cause sync_io to increase before the disk_stats
36667 * as sync_io is counted when a request starts, and
36668 * disk_stats is counted when it completes.
36669 diff --git a/drivers/md/md.h b/drivers/md/md.h
36670 index 87430fe..0024a4c 100644
36671 --- a/drivers/md/md.h
36672 +++ b/drivers/md/md.h
36673 @@ -94,10 +94,10 @@ struct mdk_rdev_s
36674 * only maintained for arrays that
36675 * support hot removal
36676 */
36677 - atomic_t read_errors; /* number of consecutive read errors that
36678 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
36679 * we have tried to ignore.
36680 */
36681 - atomic_t corrected_errors; /* number of corrected read errors,
36682 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36683 * for reporting to userspace and storing
36684 * in superblock.
36685 */
36686 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36687
36688 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36689 {
36690 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36691 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36692 }
36693
36694 struct mdk_personality
36695 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36696 index 968cb14..f0ad2e4 100644
36697 --- a/drivers/md/raid1.c
36698 +++ b/drivers/md/raid1.c
36699 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36700 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36701 continue;
36702 rdev = conf->mirrors[d].rdev;
36703 - atomic_add(s, &rdev->corrected_errors);
36704 + atomic_add_unchecked(s, &rdev->corrected_errors);
36705 if (sync_page_io(rdev->bdev,
36706 sect + rdev->data_offset,
36707 s<<9,
36708 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36709 /* Well, this device is dead */
36710 md_error(mddev, rdev);
36711 else {
36712 - atomic_add(s, &rdev->corrected_errors);
36713 + atomic_add_unchecked(s, &rdev->corrected_errors);
36714 printk(KERN_INFO
36715 "raid1:%s: read error corrected "
36716 "(%d sectors at %llu on %s)\n",
36717 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36718 index 1b4e232..cf0f534 100644
36719 --- a/drivers/md/raid10.c
36720 +++ b/drivers/md/raid10.c
36721 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36722 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36723 set_bit(R10BIO_Uptodate, &r10_bio->state);
36724 else {
36725 - atomic_add(r10_bio->sectors,
36726 + atomic_add_unchecked(r10_bio->sectors,
36727 &conf->mirrors[d].rdev->corrected_errors);
36728 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36729 md_error(r10_bio->mddev,
36730 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36731 test_bit(In_sync, &rdev->flags)) {
36732 atomic_inc(&rdev->nr_pending);
36733 rcu_read_unlock();
36734 - atomic_add(s, &rdev->corrected_errors);
36735 + atomic_add_unchecked(s, &rdev->corrected_errors);
36736 if (sync_page_io(rdev->bdev,
36737 r10_bio->devs[sl].addr +
36738 sect + rdev->data_offset,
36739 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36740 index 883215d..675bf47 100644
36741 --- a/drivers/md/raid5.c
36742 +++ b/drivers/md/raid5.c
36743 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36744 bi->bi_next = NULL;
36745 if ((rw & WRITE) &&
36746 test_bit(R5_ReWrite, &sh->dev[i].flags))
36747 - atomic_add(STRIPE_SECTORS,
36748 + atomic_add_unchecked(STRIPE_SECTORS,
36749 &rdev->corrected_errors);
36750 generic_make_request(bi);
36751 } else {
36752 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36753 clear_bit(R5_ReadError, &sh->dev[i].flags);
36754 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36755 }
36756 - if (atomic_read(&conf->disks[i].rdev->read_errors))
36757 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
36758 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36759 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36760 } else {
36761 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36762 int retry = 0;
36763 rdev = conf->disks[i].rdev;
36764
36765 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36766 - atomic_inc(&rdev->read_errors);
36767 + atomic_inc_unchecked(&rdev->read_errors);
36768 if (conf->mddev->degraded >= conf->max_degraded)
36769 printk_rl(KERN_WARNING
36770 "raid5:%s: read error not correctable "
36771 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36772 (unsigned long long)(sh->sector
36773 + rdev->data_offset),
36774 bdn);
36775 - else if (atomic_read(&rdev->read_errors)
36776 + else if (atomic_read_unchecked(&rdev->read_errors)
36777 > conf->max_nr_stripes)
36778 printk(KERN_WARNING
36779 "raid5:%s: Too many read errors, failing device %s.\n",
36780 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36781 sector_t r_sector;
36782 struct stripe_head sh2;
36783
36784 + pax_track_stack();
36785
36786 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36787 stripe = new_sector;
36788 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36789 index 05bde9c..2f31d40 100644
36790 --- a/drivers/media/common/saa7146_hlp.c
36791 +++ b/drivers/media/common/saa7146_hlp.c
36792 @@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36793
36794 int x[32], y[32], w[32], h[32];
36795
36796 + pax_track_stack();
36797 +
36798 /* clear out memory */
36799 memset(&line_list[0], 0x00, sizeof(u32)*32);
36800 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36801 diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36802 index cb22da5..82b686e 100644
36803 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36804 +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36805 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36806 u8 buf[HOST_LINK_BUF_SIZE];
36807 int i;
36808
36809 + pax_track_stack();
36810 +
36811 dprintk("%s\n", __func__);
36812
36813 /* check if we have space for a link buf in the rx_buffer */
36814 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36815 unsigned long timeout;
36816 int written;
36817
36818 + pax_track_stack();
36819 +
36820 dprintk("%s\n", __func__);
36821
36822 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36823 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36824 index 2fe05d0..a3289c4 100644
36825 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
36826 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36827 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
36828 union {
36829 dmx_ts_cb ts;
36830 dmx_section_cb sec;
36831 - } cb;
36832 + } __no_const cb;
36833
36834 struct dvb_demux *demux;
36835 void *priv;
36836 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36837 index 94159b9..376bd8e 100644
36838 --- a/drivers/media/dvb/dvb-core/dvbdev.c
36839 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
36840 @@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36841 const struct dvb_device *template, void *priv, int type)
36842 {
36843 struct dvb_device *dvbdev;
36844 - struct file_operations *dvbdevfops;
36845 + file_operations_no_const *dvbdevfops;
36846 struct device *clsdev;
36847 int minor;
36848 int id;
36849 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36850 index 2a53dd0..db8c07a 100644
36851 --- a/drivers/media/dvb/dvb-usb/cxusb.c
36852 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
36853 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36854 struct dib0700_adapter_state {
36855 int (*set_param_save) (struct dvb_frontend *,
36856 struct dvb_frontend_parameters *);
36857 -};
36858 +} __no_const;
36859
36860 static int dib7070_set_param_override(struct dvb_frontend *fe,
36861 struct dvb_frontend_parameters *fep)
36862 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36863 index db7f7f7..f55e96f 100644
36864 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36865 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36866 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36867
36868 u8 buf[260];
36869
36870 + pax_track_stack();
36871 +
36872 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36873 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36874
36875 diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36876 index 524acf5..5ffc403 100644
36877 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36878 +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36879 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36880
36881 struct dib0700_adapter_state {
36882 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36883 -};
36884 +} __no_const;
36885
36886 /* Hauppauge Nova-T 500 (aka Bristol)
36887 * has a LNA on GPIO0 which is enabled by setting 1 */
36888 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36889 index ba91735..4261d84 100644
36890 --- a/drivers/media/dvb/frontends/dib3000.h
36891 +++ b/drivers/media/dvb/frontends/dib3000.h
36892 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36893 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36894 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36895 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36896 -};
36897 +} __no_const;
36898
36899 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36900 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36901 diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36902 index c709ce6..b3fe620 100644
36903 --- a/drivers/media/dvb/frontends/or51211.c
36904 +++ b/drivers/media/dvb/frontends/or51211.c
36905 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36906 u8 tudata[585];
36907 int i;
36908
36909 + pax_track_stack();
36910 +
36911 dprintk("Firmware is %zd bytes\n",fw->size);
36912
36913 /* Get eprom data */
36914 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36915 index 482d0f3..ee1e202 100644
36916 --- a/drivers/media/radio/radio-cadet.c
36917 +++ b/drivers/media/radio/radio-cadet.c
36918 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36919 while (i < count && dev->rdsin != dev->rdsout)
36920 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36921
36922 - if (copy_to_user(data, readbuf, i))
36923 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36924 return -EFAULT;
36925 return i;
36926 }
36927 diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36928 index 6dd51e2..0359b92 100644
36929 --- a/drivers/media/video/cx18/cx18-driver.c
36930 +++ b/drivers/media/video/cx18/cx18-driver.c
36931 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36932
36933 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36934
36935 -static atomic_t cx18_instance = ATOMIC_INIT(0);
36936 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36937
36938 /* Parameter declarations */
36939 static int cardtype[CX18_MAX_CARDS];
36940 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36941 struct i2c_client c;
36942 u8 eedata[256];
36943
36944 + pax_track_stack();
36945 +
36946 memset(&c, 0, sizeof(c));
36947 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36948 c.adapter = &cx->i2c_adap[0];
36949 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36950 struct cx18 *cx;
36951
36952 /* FIXME - module parameter arrays constrain max instances */
36953 - i = atomic_inc_return(&cx18_instance) - 1;
36954 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36955 if (i >= CX18_MAX_CARDS) {
36956 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36957 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36958 diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36959 index 463ec34..2f4625a 100644
36960 --- a/drivers/media/video/ivtv/ivtv-driver.c
36961 +++ b/drivers/media/video/ivtv/ivtv-driver.c
36962 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36963 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36964
36965 /* ivtv instance counter */
36966 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
36967 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36968
36969 /* Parameter declarations */
36970 static int cardtype[IVTV_MAX_CARDS];
36971 diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36972 index 5fc4ac0..652a54a 100644
36973 --- a/drivers/media/video/omap24xxcam.c
36974 +++ b/drivers/media/video/omap24xxcam.c
36975 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36976 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36977
36978 do_gettimeofday(&vb->ts);
36979 - vb->field_count = atomic_add_return(2, &fh->field_count);
36980 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36981 if (csr & csr_error) {
36982 vb->state = VIDEOBUF_ERROR;
36983 if (!atomic_read(&fh->cam->in_reset)) {
36984 diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36985 index 2ce67f5..cf26a5b 100644
36986 --- a/drivers/media/video/omap24xxcam.h
36987 +++ b/drivers/media/video/omap24xxcam.h
36988 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36989 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36990 struct videobuf_queue vbq;
36991 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36992 - atomic_t field_count; /* field counter for videobuf_buffer */
36993 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36994 /* accessing cam here doesn't need serialisation: it's constant */
36995 struct omap24xxcam_device *cam;
36996 };
36997 diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36998 index 299afa4..eb47459 100644
36999 --- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
37000 +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
37001 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
37002 u8 *eeprom;
37003 struct tveeprom tvdata;
37004
37005 + pax_track_stack();
37006 +
37007 memset(&tvdata,0,sizeof(tvdata));
37008
37009 eeprom = pvr2_eeprom_fetch(hdw);
37010 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
37011 index 5b152ff..3320638 100644
37012 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
37013 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
37014 @@ -195,7 +195,7 @@ struct pvr2_hdw {
37015
37016 /* I2C stuff */
37017 struct i2c_adapter i2c_adap;
37018 - struct i2c_algorithm i2c_algo;
37019 + i2c_algorithm_no_const i2c_algo;
37020 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
37021 int i2c_cx25840_hack_state;
37022 int i2c_linked;
37023 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
37024 index 1eabff6..8e2313a 100644
37025 --- a/drivers/media/video/saa7134/saa6752hs.c
37026 +++ b/drivers/media/video/saa7134/saa6752hs.c
37027 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
37028 unsigned char localPAT[256];
37029 unsigned char localPMT[256];
37030
37031 + pax_track_stack();
37032 +
37033 /* Set video format - must be done first as it resets other settings */
37034 set_reg8(client, 0x41, h->video_format);
37035
37036 diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
37037 index 9c1d3ac..b1b49e9 100644
37038 --- a/drivers/media/video/saa7164/saa7164-cmd.c
37039 +++ b/drivers/media/video/saa7164/saa7164-cmd.c
37040 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
37041 wait_queue_head_t *q = 0;
37042 dprintk(DBGLVL_CMD, "%s()\n", __func__);
37043
37044 + pax_track_stack();
37045 +
37046 /* While any outstand message on the bus exists... */
37047 do {
37048
37049 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
37050 u8 tmp[512];
37051 dprintk(DBGLVL_CMD, "%s()\n", __func__);
37052
37053 + pax_track_stack();
37054 +
37055 while (loop) {
37056
37057 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
37058 diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
37059 index b085496..cde0270 100644
37060 --- a/drivers/media/video/usbvideo/ibmcam.c
37061 +++ b/drivers/media/video/usbvideo/ibmcam.c
37062 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
37063 static int __init ibmcam_init(void)
37064 {
37065 struct usbvideo_cb cbTbl;
37066 - memset(&cbTbl, 0, sizeof(cbTbl));
37067 - cbTbl.probe = ibmcam_probe;
37068 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
37069 - cbTbl.videoStart = ibmcam_video_start;
37070 - cbTbl.videoStop = ibmcam_video_stop;
37071 - cbTbl.processData = ibmcam_ProcessIsocData;
37072 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37073 - cbTbl.adjustPicture = ibmcam_adjust_picture;
37074 - cbTbl.getFPS = ibmcam_calculate_fps;
37075 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
37076 + *(void **)&cbTbl.probe = ibmcam_probe;
37077 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
37078 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
37079 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
37080 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
37081 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37082 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
37083 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
37084 return usbvideo_register(
37085 &cams,
37086 MAX_IBMCAM,
37087 diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
37088 index 31d57f2..600b735 100644
37089 --- a/drivers/media/video/usbvideo/konicawc.c
37090 +++ b/drivers/media/video/usbvideo/konicawc.c
37091 @@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
37092 int error;
37093
37094 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37095 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37096 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37097
37098 cam->input = input_dev = input_allocate_device();
37099 if (!input_dev) {
37100 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
37101 struct usbvideo_cb cbTbl;
37102 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
37103 DRIVER_DESC "\n");
37104 - memset(&cbTbl, 0, sizeof(cbTbl));
37105 - cbTbl.probe = konicawc_probe;
37106 - cbTbl.setupOnOpen = konicawc_setup_on_open;
37107 - cbTbl.processData = konicawc_process_isoc;
37108 - cbTbl.getFPS = konicawc_calculate_fps;
37109 - cbTbl.setVideoMode = konicawc_set_video_mode;
37110 - cbTbl.startDataPump = konicawc_start_data;
37111 - cbTbl.stopDataPump = konicawc_stop_data;
37112 - cbTbl.adjustPicture = konicawc_adjust_picture;
37113 - cbTbl.userFree = konicawc_free_uvd;
37114 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
37115 + *(void **)&cbTbl.probe = konicawc_probe;
37116 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
37117 + *(void **)&cbTbl.processData = konicawc_process_isoc;
37118 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
37119 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
37120 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
37121 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
37122 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
37123 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
37124 return usbvideo_register(
37125 &cams,
37126 MAX_CAMERAS,
37127 diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
37128 index 803d3e4..c4d1b96 100644
37129 --- a/drivers/media/video/usbvideo/quickcam_messenger.c
37130 +++ b/drivers/media/video/usbvideo/quickcam_messenger.c
37131 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
37132 int error;
37133
37134 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37135 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37136 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37137
37138 cam->input = input_dev = input_allocate_device();
37139 if (!input_dev) {
37140 diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
37141 index fbd1b63..292f9f0 100644
37142 --- a/drivers/media/video/usbvideo/ultracam.c
37143 +++ b/drivers/media/video/usbvideo/ultracam.c
37144 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
37145 {
37146 struct usbvideo_cb cbTbl;
37147 memset(&cbTbl, 0, sizeof(cbTbl));
37148 - cbTbl.probe = ultracam_probe;
37149 - cbTbl.setupOnOpen = ultracam_setup_on_open;
37150 - cbTbl.videoStart = ultracam_video_start;
37151 - cbTbl.videoStop = ultracam_video_stop;
37152 - cbTbl.processData = ultracam_ProcessIsocData;
37153 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37154 - cbTbl.adjustPicture = ultracam_adjust_picture;
37155 - cbTbl.getFPS = ultracam_calculate_fps;
37156 + *(void **)&cbTbl.probe = ultracam_probe;
37157 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
37158 + *(void **)&cbTbl.videoStart = ultracam_video_start;
37159 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
37160 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
37161 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37162 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
37163 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
37164 return usbvideo_register(
37165 &cams,
37166 MAX_CAMERAS,
37167 diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
37168 index dea8b32..34f6878 100644
37169 --- a/drivers/media/video/usbvideo/usbvideo.c
37170 +++ b/drivers/media/video/usbvideo/usbvideo.c
37171 @@ -697,15 +697,15 @@ int usbvideo_register(
37172 __func__, cams, base_size, num_cams);
37173
37174 /* Copy callbacks, apply defaults for those that are not set */
37175 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
37176 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
37177 if (cams->cb.getFrame == NULL)
37178 - cams->cb.getFrame = usbvideo_GetFrame;
37179 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
37180 if (cams->cb.disconnect == NULL)
37181 - cams->cb.disconnect = usbvideo_Disconnect;
37182 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
37183 if (cams->cb.startDataPump == NULL)
37184 - cams->cb.startDataPump = usbvideo_StartDataPump;
37185 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
37186 if (cams->cb.stopDataPump == NULL)
37187 - cams->cb.stopDataPump = usbvideo_StopDataPump;
37188 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
37189
37190 cams->num_cameras = num_cams;
37191 cams->cam = (struct uvd *) &cams[1];
37192 diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
37193 index c66985b..7fa143a 100644
37194 --- a/drivers/media/video/usbvideo/usbvideo.h
37195 +++ b/drivers/media/video/usbvideo/usbvideo.h
37196 @@ -268,7 +268,7 @@ struct usbvideo_cb {
37197 int (*startDataPump)(struct uvd *uvd);
37198 void (*stopDataPump)(struct uvd *uvd);
37199 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
37200 -};
37201 +} __no_const;
37202
37203 struct usbvideo {
37204 int num_cameras; /* As allocated */
37205 diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
37206 index e0f91e4..37554ea 100644
37207 --- a/drivers/media/video/usbvision/usbvision-core.c
37208 +++ b/drivers/media/video/usbvision/usbvision-core.c
37209 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
37210 unsigned char rv, gv, bv;
37211 static unsigned char *Y, *U, *V;
37212
37213 + pax_track_stack();
37214 +
37215 frame = usbvision->curFrame;
37216 imageSize = frame->frmwidth * frame->frmheight;
37217 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
37218 diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
37219 index 0d06e7c..3d17d24 100644
37220 --- a/drivers/media/video/v4l2-device.c
37221 +++ b/drivers/media/video/v4l2-device.c
37222 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
37223 EXPORT_SYMBOL_GPL(v4l2_device_register);
37224
37225 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
37226 - atomic_t *instance)
37227 + atomic_unchecked_t *instance)
37228 {
37229 - int num = atomic_inc_return(instance) - 1;
37230 + int num = atomic_inc_return_unchecked(instance) - 1;
37231 int len = strlen(basename);
37232
37233 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
37234 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
37235 index 032ebae..6a3532c 100644
37236 --- a/drivers/media/video/videobuf-dma-sg.c
37237 +++ b/drivers/media/video/videobuf-dma-sg.c
37238 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
37239 {
37240 struct videobuf_queue q;
37241
37242 + pax_track_stack();
37243 +
37244 /* Required to make generic handler to call __videobuf_alloc */
37245 q.int_ops = &sg_ops;
37246
37247 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
37248 index b6992b7..9fa7547 100644
37249 --- a/drivers/message/fusion/mptbase.c
37250 +++ b/drivers/message/fusion/mptbase.c
37251 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
37252 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
37253 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
37254
37255 +#ifdef CONFIG_GRKERNSEC_HIDESYM
37256 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37257 + NULL, NULL);
37258 +#else
37259 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37260 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
37261 +#endif
37262 +
37263 /*
37264 * Rounding UP to nearest 4-kB boundary here...
37265 */
37266 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
37267 index 83873e3..e360e9a 100644
37268 --- a/drivers/message/fusion/mptsas.c
37269 +++ b/drivers/message/fusion/mptsas.c
37270 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
37271 return 0;
37272 }
37273
37274 +static inline void
37275 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37276 +{
37277 + if (phy_info->port_details) {
37278 + phy_info->port_details->rphy = rphy;
37279 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37280 + ioc->name, rphy));
37281 + }
37282 +
37283 + if (rphy) {
37284 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37285 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37286 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37287 + ioc->name, rphy, rphy->dev.release));
37288 + }
37289 +}
37290 +
37291 /* no mutex */
37292 static void
37293 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
37294 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
37295 return NULL;
37296 }
37297
37298 -static inline void
37299 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37300 -{
37301 - if (phy_info->port_details) {
37302 - phy_info->port_details->rphy = rphy;
37303 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37304 - ioc->name, rphy));
37305 - }
37306 -
37307 - if (rphy) {
37308 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37309 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37310 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37311 - ioc->name, rphy, rphy->dev.release));
37312 - }
37313 -}
37314 -
37315 static inline struct sas_port *
37316 mptsas_get_port(struct mptsas_phyinfo *phy_info)
37317 {
37318 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
37319 index bd096ca..332cf76 100644
37320 --- a/drivers/message/fusion/mptscsih.c
37321 +++ b/drivers/message/fusion/mptscsih.c
37322 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
37323
37324 h = shost_priv(SChost);
37325
37326 - if (h) {
37327 - if (h->info_kbuf == NULL)
37328 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37329 - return h->info_kbuf;
37330 - h->info_kbuf[0] = '\0';
37331 + if (!h)
37332 + return NULL;
37333
37334 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37335 - h->info_kbuf[size-1] = '\0';
37336 - }
37337 + if (h->info_kbuf == NULL)
37338 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37339 + return h->info_kbuf;
37340 + h->info_kbuf[0] = '\0';
37341 +
37342 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37343 + h->info_kbuf[size-1] = '\0';
37344
37345 return h->info_kbuf;
37346 }
37347 diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
37348 index efba702..59b2c0f 100644
37349 --- a/drivers/message/i2o/i2o_config.c
37350 +++ b/drivers/message/i2o/i2o_config.c
37351 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
37352 struct i2o_message *msg;
37353 unsigned int iop;
37354
37355 + pax_track_stack();
37356 +
37357 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
37358 return -EFAULT;
37359
37360 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
37361 index 7045c45..c07b170 100644
37362 --- a/drivers/message/i2o/i2o_proc.c
37363 +++ b/drivers/message/i2o/i2o_proc.c
37364 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
37365 "Array Controller Device"
37366 };
37367
37368 -static char *chtostr(u8 * chars, int n)
37369 -{
37370 - char tmp[256];
37371 - tmp[0] = 0;
37372 - return strncat(tmp, (char *)chars, n);
37373 -}
37374 -
37375 static int i2o_report_query_status(struct seq_file *seq, int block_status,
37376 char *group)
37377 {
37378 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
37379
37380 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
37381 seq_printf(seq, "%-#8x", ddm_table.module_id);
37382 - seq_printf(seq, "%-29s",
37383 - chtostr(ddm_table.module_name_version, 28));
37384 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
37385 seq_printf(seq, "%9d ", ddm_table.data_size);
37386 seq_printf(seq, "%8d", ddm_table.code_size);
37387
37388 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
37389
37390 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
37391 seq_printf(seq, "%-#8x", dst->module_id);
37392 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
37393 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
37394 + seq_printf(seq, "%-.28s", dst->module_name_version);
37395 + seq_printf(seq, "%-.8s", dst->date);
37396 seq_printf(seq, "%8d ", dst->module_size);
37397 seq_printf(seq, "%8d ", dst->mpb_size);
37398 seq_printf(seq, "0x%04x", dst->module_flags);
37399 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
37400 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
37401 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
37402 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
37403 - seq_printf(seq, "Vendor info : %s\n",
37404 - chtostr((u8 *) (work32 + 2), 16));
37405 - seq_printf(seq, "Product info : %s\n",
37406 - chtostr((u8 *) (work32 + 6), 16));
37407 - seq_printf(seq, "Description : %s\n",
37408 - chtostr((u8 *) (work32 + 10), 16));
37409 - seq_printf(seq, "Product rev. : %s\n",
37410 - chtostr((u8 *) (work32 + 14), 8));
37411 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
37412 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
37413 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
37414 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
37415
37416 seq_printf(seq, "Serial number : ");
37417 print_serial_number(seq, (u8 *) (work32 + 16),
37418 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
37419 }
37420
37421 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
37422 - seq_printf(seq, "Module name : %s\n",
37423 - chtostr(result.module_name, 24));
37424 - seq_printf(seq, "Module revision : %s\n",
37425 - chtostr(result.module_rev, 8));
37426 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
37427 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
37428
37429 seq_printf(seq, "Serial number : ");
37430 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
37431 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
37432 return 0;
37433 }
37434
37435 - seq_printf(seq, "Device name : %s\n",
37436 - chtostr(result.device_name, 64));
37437 - seq_printf(seq, "Service name : %s\n",
37438 - chtostr(result.service_name, 64));
37439 - seq_printf(seq, "Physical name : %s\n",
37440 - chtostr(result.physical_location, 64));
37441 - seq_printf(seq, "Instance number : %s\n",
37442 - chtostr(result.instance_number, 4));
37443 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
37444 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
37445 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
37446 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
37447
37448 return 0;
37449 }
37450 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37451 index 27cf4af..b1205b8 100644
37452 --- a/drivers/message/i2o/iop.c
37453 +++ b/drivers/message/i2o/iop.c
37454 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37455
37456 spin_lock_irqsave(&c->context_list_lock, flags);
37457
37458 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37459 - atomic_inc(&c->context_list_counter);
37460 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37461 + atomic_inc_unchecked(&c->context_list_counter);
37462
37463 - entry->context = atomic_read(&c->context_list_counter);
37464 + entry->context = atomic_read_unchecked(&c->context_list_counter);
37465
37466 list_add(&entry->list, &c->context_list);
37467
37468 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37469
37470 #if BITS_PER_LONG == 64
37471 spin_lock_init(&c->context_list_lock);
37472 - atomic_set(&c->context_list_counter, 0);
37473 + atomic_set_unchecked(&c->context_list_counter, 0);
37474 INIT_LIST_HEAD(&c->context_list);
37475 #endif
37476
37477 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37478 index 78e3e85..66c9a0d 100644
37479 --- a/drivers/mfd/ab3100-core.c
37480 +++ b/drivers/mfd/ab3100-core.c
37481 @@ -777,7 +777,7 @@ struct ab_family_id {
37482 char *name;
37483 };
37484
37485 -static const struct ab_family_id ids[] __initdata = {
37486 +static const struct ab_family_id ids[] __initconst = {
37487 /* AB3100 */
37488 {
37489 .id = 0xc0,
37490 diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37491 index 8d8c932..8104515 100644
37492 --- a/drivers/mfd/wm8350-i2c.c
37493 +++ b/drivers/mfd/wm8350-i2c.c
37494 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37495 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37496 int ret;
37497
37498 + pax_track_stack();
37499 +
37500 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37501 return -EINVAL;
37502
37503 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37504 index e4ff50b..4cc3f04 100644
37505 --- a/drivers/misc/kgdbts.c
37506 +++ b/drivers/misc/kgdbts.c
37507 @@ -118,7 +118,7 @@
37508 } while (0)
37509 #define MAX_CONFIG_LEN 40
37510
37511 -static struct kgdb_io kgdbts_io_ops;
37512 +static const struct kgdb_io kgdbts_io_ops;
37513 static char get_buf[BUFMAX];
37514 static int get_buf_cnt;
37515 static char put_buf[BUFMAX];
37516 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37517 module_put(THIS_MODULE);
37518 }
37519
37520 -static struct kgdb_io kgdbts_io_ops = {
37521 +static const struct kgdb_io kgdbts_io_ops = {
37522 .name = "kgdbts",
37523 .read_char = kgdbts_get_char,
37524 .write_char = kgdbts_put_char,
37525 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37526 index 37e7cfc..67cfb76 100644
37527 --- a/drivers/misc/sgi-gru/gruhandles.c
37528 +++ b/drivers/misc/sgi-gru/gruhandles.c
37529 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37530
37531 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37532 {
37533 - atomic_long_inc(&mcs_op_statistics[op].count);
37534 - atomic_long_add(clks, &mcs_op_statistics[op].total);
37535 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37536 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37537 if (mcs_op_statistics[op].max < clks)
37538 mcs_op_statistics[op].max = clks;
37539 }
37540 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37541 index 3f2375c..467c6e6 100644
37542 --- a/drivers/misc/sgi-gru/gruprocfs.c
37543 +++ b/drivers/misc/sgi-gru/gruprocfs.c
37544 @@ -32,9 +32,9 @@
37545
37546 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37547
37548 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37549 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37550 {
37551 - unsigned long val = atomic_long_read(v);
37552 + unsigned long val = atomic_long_read_unchecked(v);
37553
37554 if (val)
37555 seq_printf(s, "%16lu %s\n", val, id);
37556 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37557 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37558
37559 for (op = 0; op < mcsop_last; op++) {
37560 - count = atomic_long_read(&mcs_op_statistics[op].count);
37561 - total = atomic_long_read(&mcs_op_statistics[op].total);
37562 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37563 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37564 max = mcs_op_statistics[op].max;
37565 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37566 count ? total / count : 0, max);
37567 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37568 index 46990bc..4a251b5 100644
37569 --- a/drivers/misc/sgi-gru/grutables.h
37570 +++ b/drivers/misc/sgi-gru/grutables.h
37571 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37572 * GRU statistics.
37573 */
37574 struct gru_stats_s {
37575 - atomic_long_t vdata_alloc;
37576 - atomic_long_t vdata_free;
37577 - atomic_long_t gts_alloc;
37578 - atomic_long_t gts_free;
37579 - atomic_long_t vdata_double_alloc;
37580 - atomic_long_t gts_double_allocate;
37581 - atomic_long_t assign_context;
37582 - atomic_long_t assign_context_failed;
37583 - atomic_long_t free_context;
37584 - atomic_long_t load_user_context;
37585 - atomic_long_t load_kernel_context;
37586 - atomic_long_t lock_kernel_context;
37587 - atomic_long_t unlock_kernel_context;
37588 - atomic_long_t steal_user_context;
37589 - atomic_long_t steal_kernel_context;
37590 - atomic_long_t steal_context_failed;
37591 - atomic_long_t nopfn;
37592 - atomic_long_t break_cow;
37593 - atomic_long_t asid_new;
37594 - atomic_long_t asid_next;
37595 - atomic_long_t asid_wrap;
37596 - atomic_long_t asid_reuse;
37597 - atomic_long_t intr;
37598 - atomic_long_t intr_mm_lock_failed;
37599 - atomic_long_t call_os;
37600 - atomic_long_t call_os_offnode_reference;
37601 - atomic_long_t call_os_check_for_bug;
37602 - atomic_long_t call_os_wait_queue;
37603 - atomic_long_t user_flush_tlb;
37604 - atomic_long_t user_unload_context;
37605 - atomic_long_t user_exception;
37606 - atomic_long_t set_context_option;
37607 - atomic_long_t migrate_check;
37608 - atomic_long_t migrated_retarget;
37609 - atomic_long_t migrated_unload;
37610 - atomic_long_t migrated_unload_delay;
37611 - atomic_long_t migrated_nopfn_retarget;
37612 - atomic_long_t migrated_nopfn_unload;
37613 - atomic_long_t tlb_dropin;
37614 - atomic_long_t tlb_dropin_fail_no_asid;
37615 - atomic_long_t tlb_dropin_fail_upm;
37616 - atomic_long_t tlb_dropin_fail_invalid;
37617 - atomic_long_t tlb_dropin_fail_range_active;
37618 - atomic_long_t tlb_dropin_fail_idle;
37619 - atomic_long_t tlb_dropin_fail_fmm;
37620 - atomic_long_t tlb_dropin_fail_no_exception;
37621 - atomic_long_t tlb_dropin_fail_no_exception_war;
37622 - atomic_long_t tfh_stale_on_fault;
37623 - atomic_long_t mmu_invalidate_range;
37624 - atomic_long_t mmu_invalidate_page;
37625 - atomic_long_t mmu_clear_flush_young;
37626 - atomic_long_t flush_tlb;
37627 - atomic_long_t flush_tlb_gru;
37628 - atomic_long_t flush_tlb_gru_tgh;
37629 - atomic_long_t flush_tlb_gru_zero_asid;
37630 + atomic_long_unchecked_t vdata_alloc;
37631 + atomic_long_unchecked_t vdata_free;
37632 + atomic_long_unchecked_t gts_alloc;
37633 + atomic_long_unchecked_t gts_free;
37634 + atomic_long_unchecked_t vdata_double_alloc;
37635 + atomic_long_unchecked_t gts_double_allocate;
37636 + atomic_long_unchecked_t assign_context;
37637 + atomic_long_unchecked_t assign_context_failed;
37638 + atomic_long_unchecked_t free_context;
37639 + atomic_long_unchecked_t load_user_context;
37640 + atomic_long_unchecked_t load_kernel_context;
37641 + atomic_long_unchecked_t lock_kernel_context;
37642 + atomic_long_unchecked_t unlock_kernel_context;
37643 + atomic_long_unchecked_t steal_user_context;
37644 + atomic_long_unchecked_t steal_kernel_context;
37645 + atomic_long_unchecked_t steal_context_failed;
37646 + atomic_long_unchecked_t nopfn;
37647 + atomic_long_unchecked_t break_cow;
37648 + atomic_long_unchecked_t asid_new;
37649 + atomic_long_unchecked_t asid_next;
37650 + atomic_long_unchecked_t asid_wrap;
37651 + atomic_long_unchecked_t asid_reuse;
37652 + atomic_long_unchecked_t intr;
37653 + atomic_long_unchecked_t intr_mm_lock_failed;
37654 + atomic_long_unchecked_t call_os;
37655 + atomic_long_unchecked_t call_os_offnode_reference;
37656 + atomic_long_unchecked_t call_os_check_for_bug;
37657 + atomic_long_unchecked_t call_os_wait_queue;
37658 + atomic_long_unchecked_t user_flush_tlb;
37659 + atomic_long_unchecked_t user_unload_context;
37660 + atomic_long_unchecked_t user_exception;
37661 + atomic_long_unchecked_t set_context_option;
37662 + atomic_long_unchecked_t migrate_check;
37663 + atomic_long_unchecked_t migrated_retarget;
37664 + atomic_long_unchecked_t migrated_unload;
37665 + atomic_long_unchecked_t migrated_unload_delay;
37666 + atomic_long_unchecked_t migrated_nopfn_retarget;
37667 + atomic_long_unchecked_t migrated_nopfn_unload;
37668 + atomic_long_unchecked_t tlb_dropin;
37669 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37670 + atomic_long_unchecked_t tlb_dropin_fail_upm;
37671 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
37672 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
37673 + atomic_long_unchecked_t tlb_dropin_fail_idle;
37674 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
37675 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37676 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37677 + atomic_long_unchecked_t tfh_stale_on_fault;
37678 + atomic_long_unchecked_t mmu_invalidate_range;
37679 + atomic_long_unchecked_t mmu_invalidate_page;
37680 + atomic_long_unchecked_t mmu_clear_flush_young;
37681 + atomic_long_unchecked_t flush_tlb;
37682 + atomic_long_unchecked_t flush_tlb_gru;
37683 + atomic_long_unchecked_t flush_tlb_gru_tgh;
37684 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37685
37686 - atomic_long_t copy_gpa;
37687 + atomic_long_unchecked_t copy_gpa;
37688
37689 - atomic_long_t mesq_receive;
37690 - atomic_long_t mesq_receive_none;
37691 - atomic_long_t mesq_send;
37692 - atomic_long_t mesq_send_failed;
37693 - atomic_long_t mesq_noop;
37694 - atomic_long_t mesq_send_unexpected_error;
37695 - atomic_long_t mesq_send_lb_overflow;
37696 - atomic_long_t mesq_send_qlimit_reached;
37697 - atomic_long_t mesq_send_amo_nacked;
37698 - atomic_long_t mesq_send_put_nacked;
37699 - atomic_long_t mesq_qf_not_full;
37700 - atomic_long_t mesq_qf_locked;
37701 - atomic_long_t mesq_qf_noop_not_full;
37702 - atomic_long_t mesq_qf_switch_head_failed;
37703 - atomic_long_t mesq_qf_unexpected_error;
37704 - atomic_long_t mesq_noop_unexpected_error;
37705 - atomic_long_t mesq_noop_lb_overflow;
37706 - atomic_long_t mesq_noop_qlimit_reached;
37707 - atomic_long_t mesq_noop_amo_nacked;
37708 - atomic_long_t mesq_noop_put_nacked;
37709 + atomic_long_unchecked_t mesq_receive;
37710 + atomic_long_unchecked_t mesq_receive_none;
37711 + atomic_long_unchecked_t mesq_send;
37712 + atomic_long_unchecked_t mesq_send_failed;
37713 + atomic_long_unchecked_t mesq_noop;
37714 + atomic_long_unchecked_t mesq_send_unexpected_error;
37715 + atomic_long_unchecked_t mesq_send_lb_overflow;
37716 + atomic_long_unchecked_t mesq_send_qlimit_reached;
37717 + atomic_long_unchecked_t mesq_send_amo_nacked;
37718 + atomic_long_unchecked_t mesq_send_put_nacked;
37719 + atomic_long_unchecked_t mesq_qf_not_full;
37720 + atomic_long_unchecked_t mesq_qf_locked;
37721 + atomic_long_unchecked_t mesq_qf_noop_not_full;
37722 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
37723 + atomic_long_unchecked_t mesq_qf_unexpected_error;
37724 + atomic_long_unchecked_t mesq_noop_unexpected_error;
37725 + atomic_long_unchecked_t mesq_noop_lb_overflow;
37726 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
37727 + atomic_long_unchecked_t mesq_noop_amo_nacked;
37728 + atomic_long_unchecked_t mesq_noop_put_nacked;
37729
37730 };
37731
37732 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37733 cchop_deallocate, tghop_invalidate, mcsop_last};
37734
37735 struct mcs_op_statistic {
37736 - atomic_long_t count;
37737 - atomic_long_t total;
37738 + atomic_long_unchecked_t count;
37739 + atomic_long_unchecked_t total;
37740 unsigned long max;
37741 };
37742
37743 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37744
37745 #define STAT(id) do { \
37746 if (gru_options & OPT_STATS) \
37747 - atomic_long_inc(&gru_stats.id); \
37748 + atomic_long_inc_unchecked(&gru_stats.id); \
37749 } while (0)
37750
37751 #ifdef CONFIG_SGI_GRU_DEBUG
37752 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37753 index 2275126..12a9dbfb 100644
37754 --- a/drivers/misc/sgi-xp/xp.h
37755 +++ b/drivers/misc/sgi-xp/xp.h
37756 @@ -289,7 +289,7 @@ struct xpc_interface {
37757 xpc_notify_func, void *);
37758 void (*received) (short, int, void *);
37759 enum xp_retval (*partid_to_nasids) (short, void *);
37760 -};
37761 +} __no_const;
37762
37763 extern struct xpc_interface xpc_interface;
37764
37765 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37766 index b94d5f7..7f494c5 100644
37767 --- a/drivers/misc/sgi-xp/xpc.h
37768 +++ b/drivers/misc/sgi-xp/xpc.h
37769 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
37770 void (*received_payload) (struct xpc_channel *, void *);
37771 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37772 };
37773 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37774
37775 /* struct xpc_partition act_state values (for XPC HB) */
37776
37777 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37778 /* found in xpc_main.c */
37779 extern struct device *xpc_part;
37780 extern struct device *xpc_chan;
37781 -extern struct xpc_arch_operations xpc_arch_ops;
37782 +extern xpc_arch_operations_no_const xpc_arch_ops;
37783 extern int xpc_disengage_timelimit;
37784 extern int xpc_disengage_timedout;
37785 extern int xpc_activate_IRQ_rcvd;
37786 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37787 index fd3688a..7e211a4 100644
37788 --- a/drivers/misc/sgi-xp/xpc_main.c
37789 +++ b/drivers/misc/sgi-xp/xpc_main.c
37790 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37791 .notifier_call = xpc_system_die,
37792 };
37793
37794 -struct xpc_arch_operations xpc_arch_ops;
37795 +xpc_arch_operations_no_const xpc_arch_ops;
37796
37797 /*
37798 * Timer function to enforce the timelimit on the partition disengage.
37799 diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37800 index 8b70e03..700bda6 100644
37801 --- a/drivers/misc/sgi-xp/xpc_sn2.c
37802 +++ b/drivers/misc/sgi-xp/xpc_sn2.c
37803 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37804 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37805 }
37806
37807 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37808 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37809 .setup_partitions = xpc_setup_partitions_sn2,
37810 .teardown_partitions = xpc_teardown_partitions_sn2,
37811 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37812 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37813 int ret;
37814 size_t buf_size;
37815
37816 - xpc_arch_ops = xpc_arch_ops_sn2;
37817 + pax_open_kernel();
37818 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37819 + pax_close_kernel();
37820
37821 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37822 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37823 diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37824 index 8e08d71..7cb8c9b 100644
37825 --- a/drivers/misc/sgi-xp/xpc_uv.c
37826 +++ b/drivers/misc/sgi-xp/xpc_uv.c
37827 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37828 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37829 }
37830
37831 -static struct xpc_arch_operations xpc_arch_ops_uv = {
37832 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
37833 .setup_partitions = xpc_setup_partitions_uv,
37834 .teardown_partitions = xpc_teardown_partitions_uv,
37835 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37836 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37837 int
37838 xpc_init_uv(void)
37839 {
37840 - xpc_arch_ops = xpc_arch_ops_uv;
37841 + pax_open_kernel();
37842 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37843 + pax_close_kernel();
37844
37845 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37846 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37847 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37848 index 6fd20b42..650efe3 100644
37849 --- a/drivers/mmc/host/sdhci-pci.c
37850 +++ b/drivers/mmc/host/sdhci-pci.c
37851 @@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37852 .probe = via_probe,
37853 };
37854
37855 -static const struct pci_device_id pci_ids[] __devinitdata = {
37856 +static const struct pci_device_id pci_ids[] __devinitconst = {
37857 {
37858 .vendor = PCI_VENDOR_ID_RICOH,
37859 .device = PCI_DEVICE_ID_RICOH_R5C822,
37860 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37861 index e7563a9..5f90ce5 100644
37862 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
37863 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37864 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37865 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37866 unsigned long timeo = jiffies + HZ;
37867
37868 + pax_track_stack();
37869 +
37870 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37871 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37872 goto sleep;
37873 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37874 unsigned long initial_adr;
37875 int initial_len = len;
37876
37877 + pax_track_stack();
37878 +
37879 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37880 adr += chip->start;
37881 initial_adr = adr;
37882 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37883 int retries = 3;
37884 int ret;
37885
37886 + pax_track_stack();
37887 +
37888 adr += chip->start;
37889
37890 retry:
37891 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37892 index 0667a67..3ab97ed 100644
37893 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
37894 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37895 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37896 unsigned long cmd_addr;
37897 struct cfi_private *cfi = map->fldrv_priv;
37898
37899 + pax_track_stack();
37900 +
37901 adr += chip->start;
37902
37903 /* Ensure cmd read/writes are aligned. */
37904 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37905 DECLARE_WAITQUEUE(wait, current);
37906 int wbufsize, z;
37907
37908 + pax_track_stack();
37909 +
37910 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37911 if (adr & (map_bankwidth(map)-1))
37912 return -EINVAL;
37913 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37914 DECLARE_WAITQUEUE(wait, current);
37915 int ret = 0;
37916
37917 + pax_track_stack();
37918 +
37919 adr += chip->start;
37920
37921 /* Let's determine this according to the interleave only once */
37922 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37923 unsigned long timeo = jiffies + HZ;
37924 DECLARE_WAITQUEUE(wait, current);
37925
37926 + pax_track_stack();
37927 +
37928 adr += chip->start;
37929
37930 /* Let's determine this according to the interleave only once */
37931 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37932 unsigned long timeo = jiffies + HZ;
37933 DECLARE_WAITQUEUE(wait, current);
37934
37935 + pax_track_stack();
37936 +
37937 adr += chip->start;
37938
37939 /* Let's determine this according to the interleave only once */
37940 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37941 index 5bf5f46..c5de373 100644
37942 --- a/drivers/mtd/devices/doc2000.c
37943 +++ b/drivers/mtd/devices/doc2000.c
37944 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37945
37946 /* The ECC will not be calculated correctly if less than 512 is written */
37947 /* DBB-
37948 - if (len != 0x200 && eccbuf)
37949 + if (len != 0x200)
37950 printk(KERN_WARNING
37951 "ECC needs a full sector write (adr: %lx size %lx)\n",
37952 (long) to, (long) len);
37953 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37954 index 0990f78..bb4e8a4 100644
37955 --- a/drivers/mtd/devices/doc2001.c
37956 +++ b/drivers/mtd/devices/doc2001.c
37957 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37958 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37959
37960 /* Don't allow read past end of device */
37961 - if (from >= this->totlen)
37962 + if (from >= this->totlen || !len)
37963 return -EINVAL;
37964
37965 /* Don't allow a single read to cross a 512-byte block boundary */
37966 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37967 index e56d6b4..f07e6cf 100644
37968 --- a/drivers/mtd/ftl.c
37969 +++ b/drivers/mtd/ftl.c
37970 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37971 loff_t offset;
37972 uint16_t srcunitswap = cpu_to_le16(srcunit);
37973
37974 + pax_track_stack();
37975 +
37976 eun = &part->EUNInfo[srcunit];
37977 xfer = &part->XferInfo[xferunit];
37978 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37979 diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37980 index 8aca552..146446e 100755
37981 --- a/drivers/mtd/inftlcore.c
37982 +++ b/drivers/mtd/inftlcore.c
37983 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37984 struct inftl_oob oob;
37985 size_t retlen;
37986
37987 + pax_track_stack();
37988 +
37989 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37990 "pending=%d)\n", inftl, thisVUC, pendingblock);
37991
37992 diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37993 index 32e82ae..ed50953 100644
37994 --- a/drivers/mtd/inftlmount.c
37995 +++ b/drivers/mtd/inftlmount.c
37996 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37997 struct INFTLPartition *ip;
37998 size_t retlen;
37999
38000 + pax_track_stack();
38001 +
38002 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
38003
38004 /*
38005 diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
38006 index 79bf40f..fe5f8fd 100644
38007 --- a/drivers/mtd/lpddr/qinfo_probe.c
38008 +++ b/drivers/mtd/lpddr/qinfo_probe.c
38009 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
38010 {
38011 map_word pfow_val[4];
38012
38013 + pax_track_stack();
38014 +
38015 /* Check identification string */
38016 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
38017 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
38018 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
38019 index 726a1b8..f46b460 100644
38020 --- a/drivers/mtd/mtdchar.c
38021 +++ b/drivers/mtd/mtdchar.c
38022 @@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
38023 u_long size;
38024 struct mtd_info_user info;
38025
38026 + pax_track_stack();
38027 +
38028 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
38029
38030 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
38031 diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
38032 index 1002e18..26d82d5 100644
38033 --- a/drivers/mtd/nftlcore.c
38034 +++ b/drivers/mtd/nftlcore.c
38035 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
38036 int inplace = 1;
38037 size_t retlen;
38038
38039 + pax_track_stack();
38040 +
38041 memset(BlockMap, 0xff, sizeof(BlockMap));
38042 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
38043
38044 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
38045 index 8b22b18..6fada85 100644
38046 --- a/drivers/mtd/nftlmount.c
38047 +++ b/drivers/mtd/nftlmount.c
38048 @@ -23,6 +23,7 @@
38049 #include <asm/errno.h>
38050 #include <linux/delay.h>
38051 #include <linux/slab.h>
38052 +#include <linux/sched.h>
38053 #include <linux/mtd/mtd.h>
38054 #include <linux/mtd/nand.h>
38055 #include <linux/mtd/nftl.h>
38056 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
38057 struct mtd_info *mtd = nftl->mbd.mtd;
38058 unsigned int i;
38059
38060 + pax_track_stack();
38061 +
38062 /* Assume logical EraseSize == physical erasesize for starting the scan.
38063 We'll sort it out later if we find a MediaHeader which says otherwise */
38064 /* Actually, we won't. The new DiskOnChip driver has already scanned
38065 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
38066 index 14cec04..d775b87 100644
38067 --- a/drivers/mtd/ubi/build.c
38068 +++ b/drivers/mtd/ubi/build.c
38069 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
38070 static int __init bytes_str_to_int(const char *str)
38071 {
38072 char *endp;
38073 - unsigned long result;
38074 + unsigned long result, scale = 1;
38075
38076 result = simple_strtoul(str, &endp, 0);
38077 if (str == endp || result >= INT_MAX) {
38078 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
38079
38080 switch (*endp) {
38081 case 'G':
38082 - result *= 1024;
38083 + scale *= 1024;
38084 case 'M':
38085 - result *= 1024;
38086 + scale *= 1024;
38087 case 'K':
38088 - result *= 1024;
38089 + scale *= 1024;
38090 if (endp[1] == 'i' && endp[2] == 'B')
38091 endp += 2;
38092 case '\0':
38093 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
38094 return -EINVAL;
38095 }
38096
38097 - return result;
38098 + if ((intoverflow_t)result*scale >= INT_MAX) {
38099 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
38100 + str);
38101 + return -EINVAL;
38102 + }
38103 +
38104 + return result*scale;
38105 }
38106
38107 /**
38108 diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
38109 index ab68886..ca405e8 100644
38110 --- a/drivers/net/atlx/atl2.c
38111 +++ b/drivers/net/atlx/atl2.c
38112 @@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
38113 */
38114
38115 #define ATL2_PARAM(X, desc) \
38116 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38117 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38118 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
38119 MODULE_PARM_DESC(X, desc);
38120 #else
38121 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
38122 index 4874b2b..67f8526 100644
38123 --- a/drivers/net/bnx2.c
38124 +++ b/drivers/net/bnx2.c
38125 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
38126 int rc = 0;
38127 u32 magic, csum;
38128
38129 + pax_track_stack();
38130 +
38131 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
38132 goto test_nvram_done;
38133
38134 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
38135 index fd3eb07..8a6978d 100644
38136 --- a/drivers/net/cxgb3/l2t.h
38137 +++ b/drivers/net/cxgb3/l2t.h
38138 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
38139 */
38140 struct l2t_skb_cb {
38141 arp_failure_handler_func arp_failure_handler;
38142 -};
38143 +} __no_const;
38144
38145 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
38146
38147 diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
38148 index 032cfe0..411af379 100644
38149 --- a/drivers/net/cxgb3/t3_hw.c
38150 +++ b/drivers/net/cxgb3/t3_hw.c
38151 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
38152 int i, addr, ret;
38153 struct t3_vpd vpd;
38154
38155 + pax_track_stack();
38156 +
38157 /*
38158 * Card information is normally at VPD_BASE but some early cards had
38159 * it at 0.
38160 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
38161 index d1e0563..b9e129c 100644
38162 --- a/drivers/net/e1000e/82571.c
38163 +++ b/drivers/net/e1000e/82571.c
38164 @@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
38165 {
38166 struct e1000_hw *hw = &adapter->hw;
38167 struct e1000_mac_info *mac = &hw->mac;
38168 - struct e1000_mac_operations *func = &mac->ops;
38169 + e1000_mac_operations_no_const *func = &mac->ops;
38170 u32 swsm = 0;
38171 u32 swsm2 = 0;
38172 bool force_clear_smbi = false;
38173 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
38174 temp = er32(ICRXDMTC);
38175 }
38176
38177 -static struct e1000_mac_operations e82571_mac_ops = {
38178 +static const struct e1000_mac_operations e82571_mac_ops = {
38179 /* .check_mng_mode: mac type dependent */
38180 /* .check_for_link: media type dependent */
38181 .id_led_init = e1000e_id_led_init,
38182 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
38183 .setup_led = e1000e_setup_led_generic,
38184 };
38185
38186 -static struct e1000_phy_operations e82_phy_ops_igp = {
38187 +static const struct e1000_phy_operations e82_phy_ops_igp = {
38188 .acquire_phy = e1000_get_hw_semaphore_82571,
38189 .check_reset_block = e1000e_check_reset_block_generic,
38190 .commit_phy = NULL,
38191 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
38192 .cfg_on_link_up = NULL,
38193 };
38194
38195 -static struct e1000_phy_operations e82_phy_ops_m88 = {
38196 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
38197 .acquire_phy = e1000_get_hw_semaphore_82571,
38198 .check_reset_block = e1000e_check_reset_block_generic,
38199 .commit_phy = e1000e_phy_sw_reset,
38200 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
38201 .cfg_on_link_up = NULL,
38202 };
38203
38204 -static struct e1000_phy_operations e82_phy_ops_bm = {
38205 +static const struct e1000_phy_operations e82_phy_ops_bm = {
38206 .acquire_phy = e1000_get_hw_semaphore_82571,
38207 .check_reset_block = e1000e_check_reset_block_generic,
38208 .commit_phy = e1000e_phy_sw_reset,
38209 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
38210 .cfg_on_link_up = NULL,
38211 };
38212
38213 -static struct e1000_nvm_operations e82571_nvm_ops = {
38214 +static const struct e1000_nvm_operations e82571_nvm_ops = {
38215 .acquire_nvm = e1000_acquire_nvm_82571,
38216 .read_nvm = e1000e_read_nvm_eerd,
38217 .release_nvm = e1000_release_nvm_82571,
38218 diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
38219 index 47db9bd..fa58ccd 100644
38220 --- a/drivers/net/e1000e/e1000.h
38221 +++ b/drivers/net/e1000e/e1000.h
38222 @@ -375,9 +375,9 @@ struct e1000_info {
38223 u32 pba;
38224 u32 max_hw_frame_size;
38225 s32 (*get_variants)(struct e1000_adapter *);
38226 - struct e1000_mac_operations *mac_ops;
38227 - struct e1000_phy_operations *phy_ops;
38228 - struct e1000_nvm_operations *nvm_ops;
38229 + const struct e1000_mac_operations *mac_ops;
38230 + const struct e1000_phy_operations *phy_ops;
38231 + const struct e1000_nvm_operations *nvm_ops;
38232 };
38233
38234 /* hardware capability, feature, and workaround flags */
38235 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
38236 index ae5d736..e9a93a1 100644
38237 --- a/drivers/net/e1000e/es2lan.c
38238 +++ b/drivers/net/e1000e/es2lan.c
38239 @@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
38240 {
38241 struct e1000_hw *hw = &adapter->hw;
38242 struct e1000_mac_info *mac = &hw->mac;
38243 - struct e1000_mac_operations *func = &mac->ops;
38244 + e1000_mac_operations_no_const *func = &mac->ops;
38245
38246 /* Set media type */
38247 switch (adapter->pdev->device) {
38248 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
38249 temp = er32(ICRXDMTC);
38250 }
38251
38252 -static struct e1000_mac_operations es2_mac_ops = {
38253 +static const struct e1000_mac_operations es2_mac_ops = {
38254 .id_led_init = e1000e_id_led_init,
38255 .check_mng_mode = e1000e_check_mng_mode_generic,
38256 /* check_for_link dependent on media type */
38257 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
38258 .setup_led = e1000e_setup_led_generic,
38259 };
38260
38261 -static struct e1000_phy_operations es2_phy_ops = {
38262 +static const struct e1000_phy_operations es2_phy_ops = {
38263 .acquire_phy = e1000_acquire_phy_80003es2lan,
38264 .check_reset_block = e1000e_check_reset_block_generic,
38265 .commit_phy = e1000e_phy_sw_reset,
38266 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
38267 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
38268 };
38269
38270 -static struct e1000_nvm_operations es2_nvm_ops = {
38271 +static const struct e1000_nvm_operations es2_nvm_ops = {
38272 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
38273 .read_nvm = e1000e_read_nvm_eerd,
38274 .release_nvm = e1000_release_nvm_80003es2lan,
38275 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
38276 index 11f3b7c..6381887 100644
38277 --- a/drivers/net/e1000e/hw.h
38278 +++ b/drivers/net/e1000e/hw.h
38279 @@ -753,6 +753,7 @@ struct e1000_mac_operations {
38280 s32 (*setup_physical_interface)(struct e1000_hw *);
38281 s32 (*setup_led)(struct e1000_hw *);
38282 };
38283 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38284
38285 /* Function pointers for the PHY. */
38286 struct e1000_phy_operations {
38287 @@ -774,6 +775,7 @@ struct e1000_phy_operations {
38288 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
38289 s32 (*cfg_on_link_up)(struct e1000_hw *);
38290 };
38291 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38292
38293 /* Function pointers for the NVM. */
38294 struct e1000_nvm_operations {
38295 @@ -785,9 +787,10 @@ struct e1000_nvm_operations {
38296 s32 (*validate_nvm)(struct e1000_hw *);
38297 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
38298 };
38299 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38300
38301 struct e1000_mac_info {
38302 - struct e1000_mac_operations ops;
38303 + e1000_mac_operations_no_const ops;
38304
38305 u8 addr[6];
38306 u8 perm_addr[6];
38307 @@ -823,7 +826,7 @@ struct e1000_mac_info {
38308 };
38309
38310 struct e1000_phy_info {
38311 - struct e1000_phy_operations ops;
38312 + e1000_phy_operations_no_const ops;
38313
38314 enum e1000_phy_type type;
38315
38316 @@ -857,7 +860,7 @@ struct e1000_phy_info {
38317 };
38318
38319 struct e1000_nvm_info {
38320 - struct e1000_nvm_operations ops;
38321 + e1000_nvm_operations_no_const ops;
38322
38323 enum e1000_nvm_type type;
38324 enum e1000_nvm_override override;
38325 diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
38326 index de39f9a..e28d3e0 100644
38327 --- a/drivers/net/e1000e/ich8lan.c
38328 +++ b/drivers/net/e1000e/ich8lan.c
38329 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
38330 }
38331 }
38332
38333 -static struct e1000_mac_operations ich8_mac_ops = {
38334 +static const struct e1000_mac_operations ich8_mac_ops = {
38335 .id_led_init = e1000e_id_led_init,
38336 .check_mng_mode = e1000_check_mng_mode_ich8lan,
38337 .check_for_link = e1000_check_for_copper_link_ich8lan,
38338 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
38339 /* id_led_init dependent on mac type */
38340 };
38341
38342 -static struct e1000_phy_operations ich8_phy_ops = {
38343 +static const struct e1000_phy_operations ich8_phy_ops = {
38344 .acquire_phy = e1000_acquire_swflag_ich8lan,
38345 .check_reset_block = e1000_check_reset_block_ich8lan,
38346 .commit_phy = NULL,
38347 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
38348 .write_phy_reg = e1000e_write_phy_reg_igp,
38349 };
38350
38351 -static struct e1000_nvm_operations ich8_nvm_ops = {
38352 +static const struct e1000_nvm_operations ich8_nvm_ops = {
38353 .acquire_nvm = e1000_acquire_nvm_ich8lan,
38354 .read_nvm = e1000_read_nvm_ich8lan,
38355 .release_nvm = e1000_release_nvm_ich8lan,
38356 diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
38357 index 18d5fbb..542d96d 100644
38358 --- a/drivers/net/fealnx.c
38359 +++ b/drivers/net/fealnx.c
38360 @@ -151,7 +151,7 @@ struct chip_info {
38361 int flags;
38362 };
38363
38364 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
38365 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
38366 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38367 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
38368 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38369 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
38370 index 0e5b54b..b503f82 100644
38371 --- a/drivers/net/hamradio/6pack.c
38372 +++ b/drivers/net/hamradio/6pack.c
38373 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
38374 unsigned char buf[512];
38375 int count1;
38376
38377 + pax_track_stack();
38378 +
38379 if (!count)
38380 return;
38381
38382 diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
38383 index 5862282..7cce8cb 100644
38384 --- a/drivers/net/ibmveth.c
38385 +++ b/drivers/net/ibmveth.c
38386 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
38387 NULL,
38388 };
38389
38390 -static struct sysfs_ops veth_pool_ops = {
38391 +static const struct sysfs_ops veth_pool_ops = {
38392 .show = veth_pool_show,
38393 .store = veth_pool_store,
38394 };
38395 diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
38396 index d617f2d..57b5309 100644
38397 --- a/drivers/net/igb/e1000_82575.c
38398 +++ b/drivers/net/igb/e1000_82575.c
38399 @@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
38400 wr32(E1000_VT_CTL, vt_ctl);
38401 }
38402
38403 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
38404 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
38405 .reset_hw = igb_reset_hw_82575,
38406 .init_hw = igb_init_hw_82575,
38407 .check_for_link = igb_check_for_link_82575,
38408 @@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
38409 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
38410 };
38411
38412 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
38413 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
38414 .acquire = igb_acquire_phy_82575,
38415 .get_cfg_done = igb_get_cfg_done_82575,
38416 .release = igb_release_phy_82575,
38417 };
38418
38419 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38420 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38421 .acquire = igb_acquire_nvm_82575,
38422 .read = igb_read_nvm_eerd,
38423 .release = igb_release_nvm_82575,
38424 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
38425 index 72081df..d855cf5 100644
38426 --- a/drivers/net/igb/e1000_hw.h
38427 +++ b/drivers/net/igb/e1000_hw.h
38428 @@ -288,6 +288,7 @@ struct e1000_mac_operations {
38429 s32 (*read_mac_addr)(struct e1000_hw *);
38430 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
38431 };
38432 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38433
38434 struct e1000_phy_operations {
38435 s32 (*acquire)(struct e1000_hw *);
38436 @@ -303,6 +304,7 @@ struct e1000_phy_operations {
38437 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
38438 s32 (*write_reg)(struct e1000_hw *, u32, u16);
38439 };
38440 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38441
38442 struct e1000_nvm_operations {
38443 s32 (*acquire)(struct e1000_hw *);
38444 @@ -310,6 +312,7 @@ struct e1000_nvm_operations {
38445 void (*release)(struct e1000_hw *);
38446 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
38447 };
38448 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38449
38450 struct e1000_info {
38451 s32 (*get_invariants)(struct e1000_hw *);
38452 @@ -321,7 +324,7 @@ struct e1000_info {
38453 extern const struct e1000_info e1000_82575_info;
38454
38455 struct e1000_mac_info {
38456 - struct e1000_mac_operations ops;
38457 + e1000_mac_operations_no_const ops;
38458
38459 u8 addr[6];
38460 u8 perm_addr[6];
38461 @@ -365,7 +368,7 @@ struct e1000_mac_info {
38462 };
38463
38464 struct e1000_phy_info {
38465 - struct e1000_phy_operations ops;
38466 + e1000_phy_operations_no_const ops;
38467
38468 enum e1000_phy_type type;
38469
38470 @@ -400,7 +403,7 @@ struct e1000_phy_info {
38471 };
38472
38473 struct e1000_nvm_info {
38474 - struct e1000_nvm_operations ops;
38475 + e1000_nvm_operations_no_const ops;
38476
38477 enum e1000_nvm_type type;
38478 enum e1000_nvm_override override;
38479 @@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38480 s32 (*check_for_ack)(struct e1000_hw *, u16);
38481 s32 (*check_for_rst)(struct e1000_hw *, u16);
38482 };
38483 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38484
38485 struct e1000_mbx_stats {
38486 u32 msgs_tx;
38487 @@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38488 };
38489
38490 struct e1000_mbx_info {
38491 - struct e1000_mbx_operations ops;
38492 + e1000_mbx_operations_no_const ops;
38493 struct e1000_mbx_stats stats;
38494 u32 timeout;
38495 u32 usec_delay;
38496 diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38497 index 1e8ce37..549c453 100644
38498 --- a/drivers/net/igbvf/vf.h
38499 +++ b/drivers/net/igbvf/vf.h
38500 @@ -187,9 +187,10 @@ struct e1000_mac_operations {
38501 s32 (*read_mac_addr)(struct e1000_hw *);
38502 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38503 };
38504 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38505
38506 struct e1000_mac_info {
38507 - struct e1000_mac_operations ops;
38508 + e1000_mac_operations_no_const ops;
38509 u8 addr[6];
38510 u8 perm_addr[6];
38511
38512 @@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38513 s32 (*check_for_ack)(struct e1000_hw *);
38514 s32 (*check_for_rst)(struct e1000_hw *);
38515 };
38516 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38517
38518 struct e1000_mbx_stats {
38519 u32 msgs_tx;
38520 @@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38521 };
38522
38523 struct e1000_mbx_info {
38524 - struct e1000_mbx_operations ops;
38525 + e1000_mbx_operations_no_const ops;
38526 struct e1000_mbx_stats stats;
38527 u32 timeout;
38528 u32 usec_delay;
38529 diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38530 index aa7286b..a61394f 100644
38531 --- a/drivers/net/iseries_veth.c
38532 +++ b/drivers/net/iseries_veth.c
38533 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38534 NULL
38535 };
38536
38537 -static struct sysfs_ops veth_cnx_sysfs_ops = {
38538 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
38539 .show = veth_cnx_attribute_show
38540 };
38541
38542 @@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38543 NULL
38544 };
38545
38546 -static struct sysfs_ops veth_port_sysfs_ops = {
38547 +static const struct sysfs_ops veth_port_sysfs_ops = {
38548 .show = veth_port_attribute_show
38549 };
38550
38551 diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38552 index 8aa44dc..fa1e797 100644
38553 --- a/drivers/net/ixgb/ixgb_main.c
38554 +++ b/drivers/net/ixgb/ixgb_main.c
38555 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38556 u32 rctl;
38557 int i;
38558
38559 + pax_track_stack();
38560 +
38561 /* Check for Promiscuous and All Multicast modes */
38562
38563 rctl = IXGB_READ_REG(hw, RCTL);
38564 diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38565 index af35e1d..8781785 100644
38566 --- a/drivers/net/ixgb/ixgb_param.c
38567 +++ b/drivers/net/ixgb/ixgb_param.c
38568 @@ -260,6 +260,9 @@ void __devinit
38569 ixgb_check_options(struct ixgb_adapter *adapter)
38570 {
38571 int bd = adapter->bd_number;
38572 +
38573 + pax_track_stack();
38574 +
38575 if (bd >= IXGB_MAX_NIC) {
38576 printk(KERN_NOTICE
38577 "Warning: no configuration for board #%i\n", bd);
38578 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38579 index b17aa73..ed74540 100644
38580 --- a/drivers/net/ixgbe/ixgbe_type.h
38581 +++ b/drivers/net/ixgbe/ixgbe_type.h
38582 @@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38583 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38584 s32 (*update_checksum)(struct ixgbe_hw *);
38585 };
38586 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38587
38588 struct ixgbe_mac_operations {
38589 s32 (*init_hw)(struct ixgbe_hw *);
38590 @@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38591 /* Flow Control */
38592 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38593 };
38594 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38595
38596 struct ixgbe_phy_operations {
38597 s32 (*identify)(struct ixgbe_hw *);
38598 @@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38599 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38600 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38601 };
38602 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38603
38604 struct ixgbe_eeprom_info {
38605 - struct ixgbe_eeprom_operations ops;
38606 + ixgbe_eeprom_operations_no_const ops;
38607 enum ixgbe_eeprom_type type;
38608 u32 semaphore_delay;
38609 u16 word_size;
38610 @@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38611 };
38612
38613 struct ixgbe_mac_info {
38614 - struct ixgbe_mac_operations ops;
38615 + ixgbe_mac_operations_no_const ops;
38616 enum ixgbe_mac_type type;
38617 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38618 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38619 @@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38620 };
38621
38622 struct ixgbe_phy_info {
38623 - struct ixgbe_phy_operations ops;
38624 + ixgbe_phy_operations_no_const ops;
38625 struct mdio_if_info mdio;
38626 enum ixgbe_phy_type type;
38627 u32 id;
38628 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38629 index 291a505..2543756 100644
38630 --- a/drivers/net/mlx4/main.c
38631 +++ b/drivers/net/mlx4/main.c
38632 @@ -38,6 +38,7 @@
38633 #include <linux/errno.h>
38634 #include <linux/pci.h>
38635 #include <linux/dma-mapping.h>
38636 +#include <linux/sched.h>
38637
38638 #include <linux/mlx4/device.h>
38639 #include <linux/mlx4/doorbell.h>
38640 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38641 u64 icm_size;
38642 int err;
38643
38644 + pax_track_stack();
38645 +
38646 err = mlx4_QUERY_FW(dev);
38647 if (err) {
38648 if (err == -EACCES)
38649 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38650 index 2dce134..fa5ce75 100644
38651 --- a/drivers/net/niu.c
38652 +++ b/drivers/net/niu.c
38653 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38654 int i, num_irqs, err;
38655 u8 first_ldg;
38656
38657 + pax_track_stack();
38658 +
38659 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38660 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38661 ldg_num_map[i] = first_ldg + i;
38662 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38663 index c1b3f09..97cd8c4 100644
38664 --- a/drivers/net/pcnet32.c
38665 +++ b/drivers/net/pcnet32.c
38666 @@ -79,7 +79,7 @@ static int cards_found;
38667 /*
38668 * VLB I/O addresses
38669 */
38670 -static unsigned int pcnet32_portlist[] __initdata =
38671 +static unsigned int pcnet32_portlist[] __devinitdata =
38672 { 0x300, 0x320, 0x340, 0x360, 0 };
38673
38674 static int pcnet32_debug = 0;
38675 @@ -267,7 +267,7 @@ struct pcnet32_private {
38676 struct sk_buff **rx_skbuff;
38677 dma_addr_t *tx_dma_addr;
38678 dma_addr_t *rx_dma_addr;
38679 - struct pcnet32_access a;
38680 + struct pcnet32_access *a;
38681 spinlock_t lock; /* Guard lock */
38682 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38683 unsigned int rx_ring_size; /* current rx ring size */
38684 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38685 u16 val;
38686
38687 netif_wake_queue(dev);
38688 - val = lp->a.read_csr(ioaddr, CSR3);
38689 + val = lp->a->read_csr(ioaddr, CSR3);
38690 val &= 0x00ff;
38691 - lp->a.write_csr(ioaddr, CSR3, val);
38692 + lp->a->write_csr(ioaddr, CSR3, val);
38693 napi_enable(&lp->napi);
38694 }
38695
38696 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38697 r = mii_link_ok(&lp->mii_if);
38698 } else if (lp->chip_version >= PCNET32_79C970A) {
38699 ulong ioaddr = dev->base_addr; /* card base I/O address */
38700 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38701 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38702 } else { /* can not detect link on really old chips */
38703 r = 1;
38704 }
38705 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38706 pcnet32_netif_stop(dev);
38707
38708 spin_lock_irqsave(&lp->lock, flags);
38709 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38710 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38711
38712 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38713
38714 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38715 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38716 {
38717 struct pcnet32_private *lp = netdev_priv(dev);
38718 - struct pcnet32_access *a = &lp->a; /* access to registers */
38719 + struct pcnet32_access *a = lp->a; /* access to registers */
38720 ulong ioaddr = dev->base_addr; /* card base I/O address */
38721 struct sk_buff *skb; /* sk buff */
38722 int x, i; /* counters */
38723 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38724 pcnet32_netif_stop(dev);
38725
38726 spin_lock_irqsave(&lp->lock, flags);
38727 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38728 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38729
38730 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38731
38732 /* Reset the PCNET32 */
38733 - lp->a.reset(ioaddr);
38734 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38735 + lp->a->reset(ioaddr);
38736 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38737
38738 /* switch pcnet32 to 32bit mode */
38739 - lp->a.write_bcr(ioaddr, 20, 2);
38740 + lp->a->write_bcr(ioaddr, 20, 2);
38741
38742 /* purge & init rings but don't actually restart */
38743 pcnet32_restart(dev, 0x0000);
38744
38745 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38746 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38747
38748 /* Initialize Transmit buffers. */
38749 size = data_len + 15;
38750 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38751
38752 /* set int loopback in CSR15 */
38753 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38754 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38755 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38756
38757 teststatus = cpu_to_le16(0x8000);
38758 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38759 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38760
38761 /* Check status of descriptors */
38762 for (x = 0; x < numbuffs; x++) {
38763 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38764 }
38765 }
38766
38767 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38768 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38769 wmb();
38770 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38771 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38772 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38773 pcnet32_restart(dev, CSR0_NORMAL);
38774 } else {
38775 pcnet32_purge_rx_ring(dev);
38776 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38777 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38778 }
38779 spin_unlock_irqrestore(&lp->lock, flags);
38780
38781 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38782 static void pcnet32_led_blink_callback(struct net_device *dev)
38783 {
38784 struct pcnet32_private *lp = netdev_priv(dev);
38785 - struct pcnet32_access *a = &lp->a;
38786 + struct pcnet32_access *a = lp->a;
38787 ulong ioaddr = dev->base_addr;
38788 unsigned long flags;
38789 int i;
38790 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38791 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38792 {
38793 struct pcnet32_private *lp = netdev_priv(dev);
38794 - struct pcnet32_access *a = &lp->a;
38795 + struct pcnet32_access *a = lp->a;
38796 ulong ioaddr = dev->base_addr;
38797 unsigned long flags;
38798 int i, regs[4];
38799 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38800 {
38801 int csr5;
38802 struct pcnet32_private *lp = netdev_priv(dev);
38803 - struct pcnet32_access *a = &lp->a;
38804 + struct pcnet32_access *a = lp->a;
38805 ulong ioaddr = dev->base_addr;
38806 int ticks;
38807
38808 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38809 spin_lock_irqsave(&lp->lock, flags);
38810 if (pcnet32_tx(dev)) {
38811 /* reset the chip to clear the error condition, then restart */
38812 - lp->a.reset(ioaddr);
38813 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38814 + lp->a->reset(ioaddr);
38815 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38816 pcnet32_restart(dev, CSR0_START);
38817 netif_wake_queue(dev);
38818 }
38819 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38820 __napi_complete(napi);
38821
38822 /* clear interrupt masks */
38823 - val = lp->a.read_csr(ioaddr, CSR3);
38824 + val = lp->a->read_csr(ioaddr, CSR3);
38825 val &= 0x00ff;
38826 - lp->a.write_csr(ioaddr, CSR3, val);
38827 + lp->a->write_csr(ioaddr, CSR3, val);
38828
38829 /* Set interrupt enable. */
38830 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38831 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38832
38833 spin_unlock_irqrestore(&lp->lock, flags);
38834 }
38835 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38836 int i, csr0;
38837 u16 *buff = ptr;
38838 struct pcnet32_private *lp = netdev_priv(dev);
38839 - struct pcnet32_access *a = &lp->a;
38840 + struct pcnet32_access *a = lp->a;
38841 ulong ioaddr = dev->base_addr;
38842 unsigned long flags;
38843
38844 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38845 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38846 if (lp->phymask & (1 << j)) {
38847 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38848 - lp->a.write_bcr(ioaddr, 33,
38849 + lp->a->write_bcr(ioaddr, 33,
38850 (j << 5) | i);
38851 - *buff++ = lp->a.read_bcr(ioaddr, 34);
38852 + *buff++ = lp->a->read_bcr(ioaddr, 34);
38853 }
38854 }
38855 }
38856 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38857 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38858 lp->options |= PCNET32_PORT_FD;
38859
38860 - lp->a = *a;
38861 + lp->a = a;
38862
38863 /* prior to register_netdev, dev->name is not yet correct */
38864 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38865 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38866 if (lp->mii) {
38867 /* lp->phycount and lp->phymask are set to 0 by memset above */
38868
38869 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38870 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38871 /* scan for PHYs */
38872 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38873 unsigned short id1, id2;
38874 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38875 "Found PHY %04x:%04x at address %d.\n",
38876 id1, id2, i);
38877 }
38878 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38879 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38880 if (lp->phycount > 1) {
38881 lp->options |= PCNET32_PORT_MII;
38882 }
38883 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38884 }
38885
38886 /* Reset the PCNET32 */
38887 - lp->a.reset(ioaddr);
38888 + lp->a->reset(ioaddr);
38889
38890 /* switch pcnet32 to 32bit mode */
38891 - lp->a.write_bcr(ioaddr, 20, 2);
38892 + lp->a->write_bcr(ioaddr, 20, 2);
38893
38894 if (netif_msg_ifup(lp))
38895 printk(KERN_DEBUG
38896 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38897 (u32) (lp->init_dma_addr));
38898
38899 /* set/reset autoselect bit */
38900 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
38901 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
38902 if (lp->options & PCNET32_PORT_ASEL)
38903 val |= 2;
38904 - lp->a.write_bcr(ioaddr, 2, val);
38905 + lp->a->write_bcr(ioaddr, 2, val);
38906
38907 /* handle full duplex setting */
38908 if (lp->mii_if.full_duplex) {
38909 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
38910 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
38911 if (lp->options & PCNET32_PORT_FD) {
38912 val |= 1;
38913 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38914 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38915 if (lp->chip_version == 0x2627)
38916 val |= 3;
38917 }
38918 - lp->a.write_bcr(ioaddr, 9, val);
38919 + lp->a->write_bcr(ioaddr, 9, val);
38920 }
38921
38922 /* set/reset GPSI bit in test register */
38923 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38924 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38925 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38926 val |= 0x10;
38927 - lp->a.write_csr(ioaddr, 124, val);
38928 + lp->a->write_csr(ioaddr, 124, val);
38929
38930 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38931 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38932 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38933 * duplex, and/or enable auto negotiation, and clear DANAS
38934 */
38935 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38936 - lp->a.write_bcr(ioaddr, 32,
38937 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
38938 + lp->a->write_bcr(ioaddr, 32,
38939 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
38940 /* disable Auto Negotiation, set 10Mpbs, HD */
38941 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38942 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38943 if (lp->options & PCNET32_PORT_FD)
38944 val |= 0x10;
38945 if (lp->options & PCNET32_PORT_100)
38946 val |= 0x08;
38947 - lp->a.write_bcr(ioaddr, 32, val);
38948 + lp->a->write_bcr(ioaddr, 32, val);
38949 } else {
38950 if (lp->options & PCNET32_PORT_ASEL) {
38951 - lp->a.write_bcr(ioaddr, 32,
38952 - lp->a.read_bcr(ioaddr,
38953 + lp->a->write_bcr(ioaddr, 32,
38954 + lp->a->read_bcr(ioaddr,
38955 32) | 0x0080);
38956 /* enable auto negotiate, setup, disable fd */
38957 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38958 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38959 val |= 0x20;
38960 - lp->a.write_bcr(ioaddr, 32, val);
38961 + lp->a->write_bcr(ioaddr, 32, val);
38962 }
38963 }
38964 } else {
38965 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38966 * There is really no good other way to handle multiple PHYs
38967 * other than turning off all automatics
38968 */
38969 - val = lp->a.read_bcr(ioaddr, 2);
38970 - lp->a.write_bcr(ioaddr, 2, val & ~2);
38971 - val = lp->a.read_bcr(ioaddr, 32);
38972 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38973 + val = lp->a->read_bcr(ioaddr, 2);
38974 + lp->a->write_bcr(ioaddr, 2, val & ~2);
38975 + val = lp->a->read_bcr(ioaddr, 32);
38976 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38977
38978 if (!(lp->options & PCNET32_PORT_ASEL)) {
38979 /* setup ecmd */
38980 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38981 ecmd.speed =
38982 lp->
38983 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38984 - bcr9 = lp->a.read_bcr(ioaddr, 9);
38985 + bcr9 = lp->a->read_bcr(ioaddr, 9);
38986
38987 if (lp->options & PCNET32_PORT_FD) {
38988 ecmd.duplex = DUPLEX_FULL;
38989 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38990 ecmd.duplex = DUPLEX_HALF;
38991 bcr9 |= ~(1 << 0);
38992 }
38993 - lp->a.write_bcr(ioaddr, 9, bcr9);
38994 + lp->a->write_bcr(ioaddr, 9, bcr9);
38995 }
38996
38997 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38998 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38999
39000 #ifdef DO_DXSUFLO
39001 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
39002 - val = lp->a.read_csr(ioaddr, CSR3);
39003 + val = lp->a->read_csr(ioaddr, CSR3);
39004 val |= 0x40;
39005 - lp->a.write_csr(ioaddr, CSR3, val);
39006 + lp->a->write_csr(ioaddr, CSR3, val);
39007 }
39008 #endif
39009
39010 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
39011 napi_enable(&lp->napi);
39012
39013 /* Re-initialize the PCNET32, and start it when done. */
39014 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
39015 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
39016 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
39017 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
39018
39019 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
39020 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
39021 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
39022 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
39023
39024 netif_start_queue(dev);
39025
39026 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
39027
39028 i = 0;
39029 while (i++ < 100)
39030 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
39031 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
39032 break;
39033 /*
39034 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
39035 * reports that doing so triggers a bug in the '974.
39036 */
39037 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
39038 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
39039
39040 if (netif_msg_ifup(lp))
39041 printk(KERN_DEBUG
39042 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
39043 dev->name, i,
39044 (u32) (lp->init_dma_addr),
39045 - lp->a.read_csr(ioaddr, CSR0));
39046 + lp->a->read_csr(ioaddr, CSR0));
39047
39048 spin_unlock_irqrestore(&lp->lock, flags);
39049
39050 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
39051 * Switch back to 16bit mode to avoid problems with dumb
39052 * DOS packet driver after a warm reboot
39053 */
39054 - lp->a.write_bcr(ioaddr, 20, 4);
39055 + lp->a->write_bcr(ioaddr, 20, 4);
39056
39057 err_free_irq:
39058 spin_unlock_irqrestore(&lp->lock, flags);
39059 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
39060
39061 /* wait for stop */
39062 for (i = 0; i < 100; i++)
39063 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
39064 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
39065 break;
39066
39067 if (i >= 100 && netif_msg_drv(lp))
39068 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
39069 return;
39070
39071 /* ReInit Ring */
39072 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
39073 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
39074 i = 0;
39075 while (i++ < 1000)
39076 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
39077 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
39078 break;
39079
39080 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
39081 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
39082 }
39083
39084 static void pcnet32_tx_timeout(struct net_device *dev)
39085 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
39086 if (pcnet32_debug & NETIF_MSG_DRV)
39087 printk(KERN_ERR
39088 "%s: transmit timed out, status %4.4x, resetting.\n",
39089 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39090 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39091 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39092 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39093 dev->stats.tx_errors++;
39094 if (netif_msg_tx_err(lp)) {
39095 int i;
39096 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39097 if (netif_msg_tx_queued(lp)) {
39098 printk(KERN_DEBUG
39099 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
39100 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39101 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39102 }
39103
39104 /* Default status -- will not enable Successful-TxDone
39105 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39106 dev->stats.tx_bytes += skb->len;
39107
39108 /* Trigger an immediate send poll. */
39109 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39110 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39111
39112 dev->trans_start = jiffies;
39113
39114 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
39115
39116 spin_lock(&lp->lock);
39117
39118 - csr0 = lp->a.read_csr(ioaddr, CSR0);
39119 + csr0 = lp->a->read_csr(ioaddr, CSR0);
39120 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
39121 if (csr0 == 0xffff) {
39122 break; /* PCMCIA remove happened */
39123 }
39124 /* Acknowledge all of the current interrupt sources ASAP. */
39125 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39126 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39127
39128 if (netif_msg_intr(lp))
39129 printk(KERN_DEBUG
39130 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
39131 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
39132 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
39133
39134 /* Log misc errors. */
39135 if (csr0 & 0x4000)
39136 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
39137 if (napi_schedule_prep(&lp->napi)) {
39138 u16 val;
39139 /* set interrupt masks */
39140 - val = lp->a.read_csr(ioaddr, CSR3);
39141 + val = lp->a->read_csr(ioaddr, CSR3);
39142 val |= 0x5f00;
39143 - lp->a.write_csr(ioaddr, CSR3, val);
39144 + lp->a->write_csr(ioaddr, CSR3, val);
39145
39146 __napi_schedule(&lp->napi);
39147 break;
39148 }
39149 - csr0 = lp->a.read_csr(ioaddr, CSR0);
39150 + csr0 = lp->a->read_csr(ioaddr, CSR0);
39151 }
39152
39153 if (netif_msg_intr(lp))
39154 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
39155 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39156 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39157
39158 spin_unlock(&lp->lock);
39159
39160 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
39161
39162 spin_lock_irqsave(&lp->lock, flags);
39163
39164 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39165 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39166
39167 if (netif_msg_ifdown(lp))
39168 printk(KERN_DEBUG
39169 "%s: Shutting down ethercard, status was %2.2x.\n",
39170 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39171 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39172
39173 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
39174 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39175 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39176
39177 /*
39178 * Switch back to 16bit mode to avoid problems with dumb
39179 * DOS packet driver after a warm reboot
39180 */
39181 - lp->a.write_bcr(ioaddr, 20, 4);
39182 + lp->a->write_bcr(ioaddr, 20, 4);
39183
39184 spin_unlock_irqrestore(&lp->lock, flags);
39185
39186 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
39187 unsigned long flags;
39188
39189 spin_lock_irqsave(&lp->lock, flags);
39190 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39191 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39192 spin_unlock_irqrestore(&lp->lock, flags);
39193
39194 return &dev->stats;
39195 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
39196 if (dev->flags & IFF_ALLMULTI) {
39197 ib->filter[0] = cpu_to_le32(~0U);
39198 ib->filter[1] = cpu_to_le32(~0U);
39199 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39200 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39201 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39202 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39203 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39204 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39205 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39206 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39207 return;
39208 }
39209 /* clear the multicast filter */
39210 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
39211 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
39212 }
39213 for (i = 0; i < 4; i++)
39214 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
39215 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
39216 le16_to_cpu(mcast_table[i]));
39217 return;
39218 }
39219 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39220
39221 spin_lock_irqsave(&lp->lock, flags);
39222 suspended = pcnet32_suspend(dev, &flags, 0);
39223 - csr15 = lp->a.read_csr(ioaddr, CSR15);
39224 + csr15 = lp->a->read_csr(ioaddr, CSR15);
39225 if (dev->flags & IFF_PROMISC) {
39226 /* Log any net taps. */
39227 if (netif_msg_hw(lp))
39228 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39229 lp->init_block->mode =
39230 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
39231 7);
39232 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
39233 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
39234 } else {
39235 lp->init_block->mode =
39236 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
39237 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39238 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39239 pcnet32_load_multicast(dev);
39240 }
39241
39242 if (suspended) {
39243 int csr5;
39244 /* clear SUSPEND (SPND) - CSR5 bit 0 */
39245 - csr5 = lp->a.read_csr(ioaddr, CSR5);
39246 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39247 + csr5 = lp->a->read_csr(ioaddr, CSR5);
39248 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39249 } else {
39250 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39251 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39252 pcnet32_restart(dev, CSR0_NORMAL);
39253 netif_wake_queue(dev);
39254 }
39255 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
39256 if (!lp->mii)
39257 return 0;
39258
39259 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39260 - val_out = lp->a.read_bcr(ioaddr, 34);
39261 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39262 + val_out = lp->a->read_bcr(ioaddr, 34);
39263
39264 return val_out;
39265 }
39266 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
39267 if (!lp->mii)
39268 return;
39269
39270 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39271 - lp->a.write_bcr(ioaddr, 34, val);
39272 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39273 + lp->a->write_bcr(ioaddr, 34, val);
39274 }
39275
39276 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39277 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39278 curr_link = mii_link_ok(&lp->mii_if);
39279 } else {
39280 ulong ioaddr = dev->base_addr; /* card base I/O address */
39281 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
39282 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
39283 }
39284 if (!curr_link) {
39285 if (prev_link || verbose) {
39286 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39287 (ecmd.duplex ==
39288 DUPLEX_FULL) ? "full" : "half");
39289 }
39290 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
39291 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
39292 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
39293 if (lp->mii_if.full_duplex)
39294 bcr9 |= (1 << 0);
39295 else
39296 bcr9 &= ~(1 << 0);
39297 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
39298 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
39299 }
39300 } else {
39301 if (netif_msg_link(lp))
39302 diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
39303 index 7cc9898..6eb50d3 100644
39304 --- a/drivers/net/sis190.c
39305 +++ b/drivers/net/sis190.c
39306 @@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
39307 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
39308 struct net_device *dev)
39309 {
39310 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
39311 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
39312 struct sis190_private *tp = netdev_priv(dev);
39313 struct pci_dev *isa_bridge;
39314 u8 reg, tmp8;
39315 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
39316 index e13685a..60c948c 100644
39317 --- a/drivers/net/sundance.c
39318 +++ b/drivers/net/sundance.c
39319 @@ -225,7 +225,7 @@ enum {
39320 struct pci_id_info {
39321 const char *name;
39322 };
39323 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39324 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39325 {"D-Link DFE-550TX FAST Ethernet Adapter"},
39326 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
39327 {"D-Link DFE-580TX 4 port Server Adapter"},
39328 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
39329 index 529f55a..cccaa18 100644
39330 --- a/drivers/net/tg3.h
39331 +++ b/drivers/net/tg3.h
39332 @@ -95,6 +95,7 @@
39333 #define CHIPREV_ID_5750_A0 0x4000
39334 #define CHIPREV_ID_5750_A1 0x4001
39335 #define CHIPREV_ID_5750_A3 0x4003
39336 +#define CHIPREV_ID_5750_C1 0x4201
39337 #define CHIPREV_ID_5750_C2 0x4202
39338 #define CHIPREV_ID_5752_A0_HW 0x5000
39339 #define CHIPREV_ID_5752_A0 0x6000
39340 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
39341 index b9db1b5..720f9ce 100644
39342 --- a/drivers/net/tokenring/abyss.c
39343 +++ b/drivers/net/tokenring/abyss.c
39344 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
39345
39346 static int __init abyss_init (void)
39347 {
39348 - abyss_netdev_ops = tms380tr_netdev_ops;
39349 + pax_open_kernel();
39350 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39351
39352 - abyss_netdev_ops.ndo_open = abyss_open;
39353 - abyss_netdev_ops.ndo_stop = abyss_close;
39354 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
39355 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
39356 + pax_close_kernel();
39357
39358 return pci_register_driver(&abyss_driver);
39359 }
39360 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
39361 index 456f8bf..373e56d 100644
39362 --- a/drivers/net/tokenring/madgemc.c
39363 +++ b/drivers/net/tokenring/madgemc.c
39364 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
39365
39366 static int __init madgemc_init (void)
39367 {
39368 - madgemc_netdev_ops = tms380tr_netdev_ops;
39369 - madgemc_netdev_ops.ndo_open = madgemc_open;
39370 - madgemc_netdev_ops.ndo_stop = madgemc_close;
39371 + pax_open_kernel();
39372 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39373 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
39374 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
39375 + pax_close_kernel();
39376
39377 return mca_register_driver (&madgemc_driver);
39378 }
39379 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
39380 index 16e8783..925bd49 100644
39381 --- a/drivers/net/tokenring/proteon.c
39382 +++ b/drivers/net/tokenring/proteon.c
39383 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
39384 struct platform_device *pdev;
39385 int i, num = 0, err = 0;
39386
39387 - proteon_netdev_ops = tms380tr_netdev_ops;
39388 - proteon_netdev_ops.ndo_open = proteon_open;
39389 - proteon_netdev_ops.ndo_stop = tms380tr_close;
39390 + pax_open_kernel();
39391 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39392 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
39393 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
39394 + pax_close_kernel();
39395
39396 err = platform_driver_register(&proteon_driver);
39397 if (err)
39398 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
39399 index 46db5c5..37c1536 100644
39400 --- a/drivers/net/tokenring/skisa.c
39401 +++ b/drivers/net/tokenring/skisa.c
39402 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
39403 struct platform_device *pdev;
39404 int i, num = 0, err = 0;
39405
39406 - sk_isa_netdev_ops = tms380tr_netdev_ops;
39407 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
39408 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39409 + pax_open_kernel();
39410 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39411 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
39412 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39413 + pax_close_kernel();
39414
39415 err = platform_driver_register(&sk_isa_driver);
39416 if (err)
39417 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
39418 index 74e5ba4..5cf6bc9 100644
39419 --- a/drivers/net/tulip/de2104x.c
39420 +++ b/drivers/net/tulip/de2104x.c
39421 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
39422 struct de_srom_info_leaf *il;
39423 void *bufp;
39424
39425 + pax_track_stack();
39426 +
39427 /* download entire eeprom */
39428 for (i = 0; i < DE_EEPROM_WORDS; i++)
39429 ((__le16 *)ee_data)[i] =
39430 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
39431 index a8349b7..90f9dfe 100644
39432 --- a/drivers/net/tulip/de4x5.c
39433 +++ b/drivers/net/tulip/de4x5.c
39434 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39435 for (i=0; i<ETH_ALEN; i++) {
39436 tmp.addr[i] = dev->dev_addr[i];
39437 }
39438 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39439 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39440 break;
39441
39442 case DE4X5_SET_HWADDR: /* Set the hardware address */
39443 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39444 spin_lock_irqsave(&lp->lock, flags);
39445 memcpy(&statbuf, &lp->pktStats, ioc->len);
39446 spin_unlock_irqrestore(&lp->lock, flags);
39447 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
39448 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39449 return -EFAULT;
39450 break;
39451 }
39452 diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39453 index 391acd3..56d11cd 100644
39454 --- a/drivers/net/tulip/eeprom.c
39455 +++ b/drivers/net/tulip/eeprom.c
39456 @@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39457 {NULL}};
39458
39459
39460 -static const char *block_name[] __devinitdata = {
39461 +static const char *block_name[] __devinitconst = {
39462 "21140 non-MII",
39463 "21140 MII PHY",
39464 "21142 Serial PHY",
39465 diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39466 index b38d3b7..b1cff23 100644
39467 --- a/drivers/net/tulip/winbond-840.c
39468 +++ b/drivers/net/tulip/winbond-840.c
39469 @@ -235,7 +235,7 @@ struct pci_id_info {
39470 int drv_flags; /* Driver use, intended as capability flags. */
39471 };
39472
39473 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39474 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39475 { /* Sometime a Level-One switch card. */
39476 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39477 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39478 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39479 index f450bc9..2b747c8 100644
39480 --- a/drivers/net/usb/hso.c
39481 +++ b/drivers/net/usb/hso.c
39482 @@ -71,7 +71,7 @@
39483 #include <asm/byteorder.h>
39484 #include <linux/serial_core.h>
39485 #include <linux/serial.h>
39486 -
39487 +#include <asm/local.h>
39488
39489 #define DRIVER_VERSION "1.2"
39490 #define MOD_AUTHOR "Option Wireless"
39491 @@ -258,7 +258,7 @@ struct hso_serial {
39492
39493 /* from usb_serial_port */
39494 struct tty_struct *tty;
39495 - int open_count;
39496 + local_t open_count;
39497 spinlock_t serial_lock;
39498
39499 int (*write_data) (struct hso_serial *serial);
39500 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39501 struct urb *urb;
39502
39503 urb = serial->rx_urb[0];
39504 - if (serial->open_count > 0) {
39505 + if (local_read(&serial->open_count) > 0) {
39506 count = put_rxbuf_data(urb, serial);
39507 if (count == -1)
39508 return;
39509 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39510 DUMP1(urb->transfer_buffer, urb->actual_length);
39511
39512 /* Anyone listening? */
39513 - if (serial->open_count == 0)
39514 + if (local_read(&serial->open_count) == 0)
39515 return;
39516
39517 if (status == 0) {
39518 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39519 spin_unlock_irq(&serial->serial_lock);
39520
39521 /* check for port already opened, if not set the termios */
39522 - serial->open_count++;
39523 - if (serial->open_count == 1) {
39524 + if (local_inc_return(&serial->open_count) == 1) {
39525 tty->low_latency = 1;
39526 serial->rx_state = RX_IDLE;
39527 /* Force default termio settings */
39528 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39529 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39530 if (result) {
39531 hso_stop_serial_device(serial->parent);
39532 - serial->open_count--;
39533 + local_dec(&serial->open_count);
39534 kref_put(&serial->parent->ref, hso_serial_ref_free);
39535 }
39536 } else {
39537 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39538
39539 /* reset the rts and dtr */
39540 /* do the actual close */
39541 - serial->open_count--;
39542 + local_dec(&serial->open_count);
39543
39544 - if (serial->open_count <= 0) {
39545 - serial->open_count = 0;
39546 + if (local_read(&serial->open_count) <= 0) {
39547 + local_set(&serial->open_count, 0);
39548 spin_lock_irq(&serial->serial_lock);
39549 if (serial->tty == tty) {
39550 serial->tty->driver_data = NULL;
39551 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39552
39553 /* the actual setup */
39554 spin_lock_irqsave(&serial->serial_lock, flags);
39555 - if (serial->open_count)
39556 + if (local_read(&serial->open_count))
39557 _hso_serial_set_termios(tty, old);
39558 else
39559 tty->termios = old;
39560 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39561 /* Start all serial ports */
39562 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39563 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39564 - if (dev2ser(serial_table[i])->open_count) {
39565 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
39566 result =
39567 hso_start_serial_device(serial_table[i], GFP_NOIO);
39568 hso_kick_transmit(dev2ser(serial_table[i]));
39569 diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39570 index 3e94f0c..ffdd926 100644
39571 --- a/drivers/net/vxge/vxge-config.h
39572 +++ b/drivers/net/vxge/vxge-config.h
39573 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39574 void (*link_down)(struct __vxge_hw_device *devh);
39575 void (*crit_err)(struct __vxge_hw_device *devh,
39576 enum vxge_hw_event type, u64 ext_data);
39577 -};
39578 +} __no_const;
39579
39580 /*
39581 * struct __vxge_hw_blockpool_entry - Block private data structure
39582 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39583 index 068d7a9..35293de 100644
39584 --- a/drivers/net/vxge/vxge-main.c
39585 +++ b/drivers/net/vxge/vxge-main.c
39586 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39587 struct sk_buff *completed[NR_SKB_COMPLETED];
39588 int more;
39589
39590 + pax_track_stack();
39591 +
39592 do {
39593 more = 0;
39594 skb_ptr = completed;
39595 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39596 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39597 int index;
39598
39599 + pax_track_stack();
39600 +
39601 /*
39602 * Filling
39603 * - itable with bucket numbers
39604 diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39605 index 461742b..81be42e 100644
39606 --- a/drivers/net/vxge/vxge-traffic.h
39607 +++ b/drivers/net/vxge/vxge-traffic.h
39608 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39609 struct vxge_hw_mempool_dma *dma_object,
39610 u32 index,
39611 u32 is_last);
39612 -};
39613 +} __no_const;
39614
39615 void
39616 __vxge_hw_mempool_destroy(
39617 diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39618 index cd8cb95..4153b79 100644
39619 --- a/drivers/net/wan/cycx_x25.c
39620 +++ b/drivers/net/wan/cycx_x25.c
39621 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39622 unsigned char hex[1024],
39623 * phex = hex;
39624
39625 + pax_track_stack();
39626 +
39627 if (len >= (sizeof(hex) / 2))
39628 len = (sizeof(hex) / 2) - 1;
39629
39630 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39631 index aa9248f..a4e3c3b 100644
39632 --- a/drivers/net/wan/hdlc_x25.c
39633 +++ b/drivers/net/wan/hdlc_x25.c
39634 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39635
39636 static int x25_open(struct net_device *dev)
39637 {
39638 - struct lapb_register_struct cb;
39639 + static struct lapb_register_struct cb = {
39640 + .connect_confirmation = x25_connected,
39641 + .connect_indication = x25_connected,
39642 + .disconnect_confirmation = x25_disconnected,
39643 + .disconnect_indication = x25_disconnected,
39644 + .data_indication = x25_data_indication,
39645 + .data_transmit = x25_data_transmit
39646 + };
39647 int result;
39648
39649 - cb.connect_confirmation = x25_connected;
39650 - cb.connect_indication = x25_connected;
39651 - cb.disconnect_confirmation = x25_disconnected;
39652 - cb.disconnect_indication = x25_disconnected;
39653 - cb.data_indication = x25_data_indication;
39654 - cb.data_transmit = x25_data_transmit;
39655 -
39656 result = lapb_register(dev, &cb);
39657 if (result != LAPB_OK)
39658 return result;
39659 diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39660 index 5ad287c..783b020 100644
39661 --- a/drivers/net/wimax/i2400m/usb-fw.c
39662 +++ b/drivers/net/wimax/i2400m/usb-fw.c
39663 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39664 int do_autopm = 1;
39665 DECLARE_COMPLETION_ONSTACK(notif_completion);
39666
39667 + pax_track_stack();
39668 +
39669 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39670 i2400m, ack, ack_size);
39671 BUG_ON(_ack == i2400m->bm_ack_buf);
39672 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39673 index 6c26840..62c97c3 100644
39674 --- a/drivers/net/wireless/airo.c
39675 +++ b/drivers/net/wireless/airo.c
39676 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39677 BSSListElement * loop_net;
39678 BSSListElement * tmp_net;
39679
39680 + pax_track_stack();
39681 +
39682 /* Blow away current list of scan results */
39683 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39684 list_move_tail (&loop_net->list, &ai->network_free_list);
39685 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39686 WepKeyRid wkr;
39687 int rc;
39688
39689 + pax_track_stack();
39690 +
39691 memset( &mySsid, 0, sizeof( mySsid ) );
39692 kfree (ai->flash);
39693 ai->flash = NULL;
39694 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39695 __le32 *vals = stats.vals;
39696 int len;
39697
39698 + pax_track_stack();
39699 +
39700 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39701 return -ENOMEM;
39702 data = (struct proc_data *)file->private_data;
39703 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39704 /* If doLoseSync is not 1, we won't do a Lose Sync */
39705 int doLoseSync = -1;
39706
39707 + pax_track_stack();
39708 +
39709 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39710 return -ENOMEM;
39711 data = (struct proc_data *)file->private_data;
39712 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39713 int i;
39714 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39715
39716 + pax_track_stack();
39717 +
39718 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39719 if (!qual)
39720 return -ENOMEM;
39721 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39722 CapabilityRid cap_rid;
39723 __le32 *vals = stats_rid.vals;
39724
39725 + pax_track_stack();
39726 +
39727 /* Get stats out of the card */
39728 clear_bit(JOB_WSTATS, &local->jobs);
39729 if (local->power.event) {
39730 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39731 index 747508c..82e965d 100644
39732 --- a/drivers/net/wireless/ath/ath5k/debug.c
39733 +++ b/drivers/net/wireless/ath/ath5k/debug.c
39734 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39735 unsigned int v;
39736 u64 tsf;
39737
39738 + pax_track_stack();
39739 +
39740 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39741 len += snprintf(buf+len, sizeof(buf)-len,
39742 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39743 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39744 unsigned int len = 0;
39745 unsigned int i;
39746
39747 + pax_track_stack();
39748 +
39749 len += snprintf(buf+len, sizeof(buf)-len,
39750 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39751
39752 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39753 index 2be4c22..593b1eb 100644
39754 --- a/drivers/net/wireless/ath/ath9k/debug.c
39755 +++ b/drivers/net/wireless/ath/ath9k/debug.c
39756 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39757 char buf[512];
39758 unsigned int len = 0;
39759
39760 + pax_track_stack();
39761 +
39762 len += snprintf(buf + len, sizeof(buf) - len,
39763 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39764 len += snprintf(buf + len, sizeof(buf) - len,
39765 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39766 int i;
39767 u8 addr[ETH_ALEN];
39768
39769 + pax_track_stack();
39770 +
39771 len += snprintf(buf + len, sizeof(buf) - len,
39772 "primary: %s (%s chan=%d ht=%d)\n",
39773 wiphy_name(sc->pri_wiphy->hw->wiphy),
39774 diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39775 index 80b19a4..dab3a45 100644
39776 --- a/drivers/net/wireless/b43/debugfs.c
39777 +++ b/drivers/net/wireless/b43/debugfs.c
39778 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
39779 struct b43_debugfs_fops {
39780 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39781 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39782 - struct file_operations fops;
39783 + const struct file_operations fops;
39784 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39785 size_t file_struct_offset;
39786 };
39787 diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39788 index 1f85ac5..c99b4b4 100644
39789 --- a/drivers/net/wireless/b43legacy/debugfs.c
39790 +++ b/drivers/net/wireless/b43legacy/debugfs.c
39791 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
39792 struct b43legacy_debugfs_fops {
39793 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39794 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39795 - struct file_operations fops;
39796 + const struct file_operations fops;
39797 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39798 size_t file_struct_offset;
39799 /* Take wl->irq_lock before calling read/write? */
39800 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39801 index 43102bf..3b569c3 100644
39802 --- a/drivers/net/wireless/ipw2x00/ipw2100.c
39803 +++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39804 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39805 int err;
39806 DECLARE_SSID_BUF(ssid);
39807
39808 + pax_track_stack();
39809 +
39810 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39811
39812 if (ssid_len)
39813 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39814 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39815 int err;
39816
39817 + pax_track_stack();
39818 +
39819 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39820 idx, keylen, len);
39821
39822 diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39823 index 282b1f7..169f0cf 100644
39824 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39825 +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39826 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39827 unsigned long flags;
39828 DECLARE_SSID_BUF(ssid);
39829
39830 + pax_track_stack();
39831 +
39832 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39833 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39834 print_ssid(ssid, info_element->data, info_element->len),
39835 diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39836 index 950267a..80d5fd2 100644
39837 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39838 +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39839 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39840 },
39841 };
39842
39843 -static struct iwl_ops iwl1000_ops = {
39844 +static const struct iwl_ops iwl1000_ops = {
39845 .ucode = &iwl5000_ucode,
39846 .lib = &iwl1000_lib,
39847 .hcmd = &iwl5000_hcmd,
39848 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39849 index 56bfcc3..b348020 100644
39850 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39851 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39852 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39853 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39854 };
39855
39856 -static struct iwl_ops iwl3945_ops = {
39857 +static const struct iwl_ops iwl3945_ops = {
39858 .ucode = &iwl3945_ucode,
39859 .lib = &iwl3945_lib,
39860 .hcmd = &iwl3945_hcmd,
39861 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39862 index 585b8d4..e142963 100644
39863 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39864 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39865 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39866 },
39867 };
39868
39869 -static struct iwl_ops iwl4965_ops = {
39870 +static const struct iwl_ops iwl4965_ops = {
39871 .ucode = &iwl4965_ucode,
39872 .lib = &iwl4965_lib,
39873 .hcmd = &iwl4965_hcmd,
39874 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39875 index 1f423f2..e37c192 100644
39876 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39877 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39878 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39879 },
39880 };
39881
39882 -struct iwl_ops iwl5000_ops = {
39883 +const struct iwl_ops iwl5000_ops = {
39884 .ucode = &iwl5000_ucode,
39885 .lib = &iwl5000_lib,
39886 .hcmd = &iwl5000_hcmd,
39887 .utils = &iwl5000_hcmd_utils,
39888 };
39889
39890 -static struct iwl_ops iwl5150_ops = {
39891 +static const struct iwl_ops iwl5150_ops = {
39892 .ucode = &iwl5000_ucode,
39893 .lib = &iwl5150_lib,
39894 .hcmd = &iwl5000_hcmd,
39895 diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39896 index 1473452..f07d5e1 100644
39897 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39898 +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39899 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39900 .calc_rssi = iwl5000_calc_rssi,
39901 };
39902
39903 -static struct iwl_ops iwl6000_ops = {
39904 +static const struct iwl_ops iwl6000_ops = {
39905 .ucode = &iwl5000_ucode,
39906 .lib = &iwl6000_lib,
39907 .hcmd = &iwl5000_hcmd,
39908 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39909 index 1a3dfa2..b3e0a61 100644
39910 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39911 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39912 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39913 u8 active_index = 0;
39914 s32 tpt = 0;
39915
39916 + pax_track_stack();
39917 +
39918 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39919
39920 if (!ieee80211_is_data(hdr->frame_control) ||
39921 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39922 u8 valid_tx_ant = 0;
39923 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39924
39925 + pax_track_stack();
39926 +
39927 /* Override starting rate (index 0) if needed for debug purposes */
39928 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39929
39930 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39931 index 0e56d78..6a3c107 100644
39932 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39933 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39934 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39935 if (iwl_debug_level & IWL_DL_INFO)
39936 dev_printk(KERN_DEBUG, &(pdev->dev),
39937 "Disabling hw_scan\n");
39938 - iwl_hw_ops.hw_scan = NULL;
39939 + pax_open_kernel();
39940 + *(void **)&iwl_hw_ops.hw_scan = NULL;
39941 + pax_close_kernel();
39942 }
39943
39944 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39945 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39946 index cbc6290..eb323d7 100644
39947 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39948 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39949 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39950 #endif
39951
39952 #else
39953 -#define IWL_DEBUG(__priv, level, fmt, args...)
39954 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39955 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39956 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39957 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39958 void *p, u32 len)
39959 {}
39960 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39961 index a198bcf..8e68233 100644
39962 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39963 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39964 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39965 int pos = 0;
39966 const size_t bufsz = sizeof(buf);
39967
39968 + pax_track_stack();
39969 +
39970 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39971 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39972 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39973 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39974 const size_t bufsz = sizeof(buf);
39975 ssize_t ret;
39976
39977 + pax_track_stack();
39978 +
39979 for (i = 0; i < AC_NUM; i++) {
39980 pos += scnprintf(buf + pos, bufsz - pos,
39981 "\tcw_min\tcw_max\taifsn\ttxop\n");
39982 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39983 index 3539ea4..b174bfa 100644
39984 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39985 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39986 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
39987
39988 /* shared structures from iwl-5000.c */
39989 extern struct iwl_mod_params iwl50_mod_params;
39990 -extern struct iwl_ops iwl5000_ops;
39991 +extern const struct iwl_ops iwl5000_ops;
39992 extern struct iwl_ucode_ops iwl5000_ucode;
39993 extern struct iwl_lib_ops iwl5000_lib;
39994 extern struct iwl_hcmd_ops iwl5000_hcmd;
39995 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39996 index 619590d..69235ee 100644
39997 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39998 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39999 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
40000 */
40001 if (iwl3945_mod_params.disable_hw_scan) {
40002 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
40003 - iwl3945_hw_ops.hw_scan = NULL;
40004 + pax_open_kernel();
40005 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
40006 + pax_close_kernel();
40007 }
40008
40009
40010 diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
40011 index 1465379..fe4d78b 100644
40012 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
40013 +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
40014 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
40015 int buf_len = 512;
40016 size_t len = 0;
40017
40018 + pax_track_stack();
40019 +
40020 if (*ppos != 0)
40021 return 0;
40022 if (count < sizeof(buf))
40023 diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
40024 index 893a55c..7f66a50 100644
40025 --- a/drivers/net/wireless/libertas/debugfs.c
40026 +++ b/drivers/net/wireless/libertas/debugfs.c
40027 @@ -708,7 +708,7 @@ out_unlock:
40028 struct lbs_debugfs_files {
40029 const char *name;
40030 int perm;
40031 - struct file_operations fops;
40032 + const struct file_operations fops;
40033 };
40034
40035 static const struct lbs_debugfs_files debugfs_files[] = {
40036 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
40037 index 2ecbedb..42704f0 100644
40038 --- a/drivers/net/wireless/rndis_wlan.c
40039 +++ b/drivers/net/wireless/rndis_wlan.c
40040 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
40041
40042 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
40043
40044 - if (rts_threshold < 0 || rts_threshold > 2347)
40045 + if (rts_threshold > 2347)
40046 rts_threshold = 2347;
40047
40048 tmp = cpu_to_le32(rts_threshold);
40049 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
40050 index 334ccd6..47f8944 100644
40051 --- a/drivers/oprofile/buffer_sync.c
40052 +++ b/drivers/oprofile/buffer_sync.c
40053 @@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
40054 if (cookie == NO_COOKIE)
40055 offset = pc;
40056 if (cookie == INVALID_COOKIE) {
40057 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40058 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40059 offset = pc;
40060 }
40061 if (cookie != last_cookie) {
40062 @@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
40063 /* add userspace sample */
40064
40065 if (!mm) {
40066 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
40067 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
40068 return 0;
40069 }
40070
40071 cookie = lookup_dcookie(mm, s->eip, &offset);
40072
40073 if (cookie == INVALID_COOKIE) {
40074 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40075 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40076 return 0;
40077 }
40078
40079 @@ -562,7 +562,7 @@ void sync_buffer(int cpu)
40080 /* ignore backtraces if failed to add a sample */
40081 if (state == sb_bt_start) {
40082 state = sb_bt_ignore;
40083 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
40084 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
40085 }
40086 }
40087 release_mm(mm);
40088 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
40089 index 5df60a6..72f5c1c 100644
40090 --- a/drivers/oprofile/event_buffer.c
40091 +++ b/drivers/oprofile/event_buffer.c
40092 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
40093 }
40094
40095 if (buffer_pos == buffer_size) {
40096 - atomic_inc(&oprofile_stats.event_lost_overflow);
40097 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
40098 return;
40099 }
40100
40101 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
40102 index dc8a042..fe5f315 100644
40103 --- a/drivers/oprofile/oprof.c
40104 +++ b/drivers/oprofile/oprof.c
40105 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
40106 if (oprofile_ops.switch_events())
40107 return;
40108
40109 - atomic_inc(&oprofile_stats.multiplex_counter);
40110 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
40111 start_switch_worker();
40112 }
40113
40114 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
40115 index 61689e8..387f7f8 100644
40116 --- a/drivers/oprofile/oprofile_stats.c
40117 +++ b/drivers/oprofile/oprofile_stats.c
40118 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
40119 cpu_buf->sample_invalid_eip = 0;
40120 }
40121
40122 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
40123 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
40124 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
40125 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
40126 - atomic_set(&oprofile_stats.multiplex_counter, 0);
40127 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
40128 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
40129 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
40130 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
40131 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
40132 }
40133
40134
40135 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
40136 index 0b54e46..a37c527 100644
40137 --- a/drivers/oprofile/oprofile_stats.h
40138 +++ b/drivers/oprofile/oprofile_stats.h
40139 @@ -13,11 +13,11 @@
40140 #include <asm/atomic.h>
40141
40142 struct oprofile_stat_struct {
40143 - atomic_t sample_lost_no_mm;
40144 - atomic_t sample_lost_no_mapping;
40145 - atomic_t bt_lost_no_mapping;
40146 - atomic_t event_lost_overflow;
40147 - atomic_t multiplex_counter;
40148 + atomic_unchecked_t sample_lost_no_mm;
40149 + atomic_unchecked_t sample_lost_no_mapping;
40150 + atomic_unchecked_t bt_lost_no_mapping;
40151 + atomic_unchecked_t event_lost_overflow;
40152 + atomic_unchecked_t multiplex_counter;
40153 };
40154
40155 extern struct oprofile_stat_struct oprofile_stats;
40156 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40157 index 2766a6d..80c77e2 100644
40158 --- a/drivers/oprofile/oprofilefs.c
40159 +++ b/drivers/oprofile/oprofilefs.c
40160 @@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
40161
40162
40163 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40164 - char const *name, atomic_t *val)
40165 + char const *name, atomic_unchecked_t *val)
40166 {
40167 struct dentry *d = __oprofilefs_create_file(sb, root, name,
40168 &atomic_ro_fops, 0444);
40169 diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
40170 index 13a64bc..ad62835 100644
40171 --- a/drivers/parisc/pdc_stable.c
40172 +++ b/drivers/parisc/pdc_stable.c
40173 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
40174 return ret;
40175 }
40176
40177 -static struct sysfs_ops pdcspath_attr_ops = {
40178 +static const struct sysfs_ops pdcspath_attr_ops = {
40179 .show = pdcspath_attr_show,
40180 .store = pdcspath_attr_store,
40181 };
40182 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40183 index 8eefe56..40751a7 100644
40184 --- a/drivers/parport/procfs.c
40185 +++ b/drivers/parport/procfs.c
40186 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40187
40188 *ppos += len;
40189
40190 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40191 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40192 }
40193
40194 #ifdef CONFIG_PARPORT_1284
40195 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40196
40197 *ppos += len;
40198
40199 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40200 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40201 }
40202 #endif /* IEEE1284.3 support. */
40203
40204 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
40205 index 73e7d8e..c80f3d2 100644
40206 --- a/drivers/pci/hotplug/acpiphp_glue.c
40207 +++ b/drivers/pci/hotplug/acpiphp_glue.c
40208 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
40209 }
40210
40211
40212 -static struct acpi_dock_ops acpiphp_dock_ops = {
40213 +static const struct acpi_dock_ops acpiphp_dock_ops = {
40214 .handler = handle_hotplug_event_func,
40215 };
40216
40217 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
40218 index 9fff878..ad0ad53 100644
40219 --- a/drivers/pci/hotplug/cpci_hotplug.h
40220 +++ b/drivers/pci/hotplug/cpci_hotplug.h
40221 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
40222 int (*hardware_test) (struct slot* slot, u32 value);
40223 u8 (*get_power) (struct slot* slot);
40224 int (*set_power) (struct slot* slot, int value);
40225 -};
40226 +} __no_const;
40227
40228 struct cpci_hp_controller {
40229 unsigned int irq;
40230 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40231 index 76ba8a1..20ca857 100644
40232 --- a/drivers/pci/hotplug/cpqphp_nvram.c
40233 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
40234 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40235
40236 void compaq_nvram_init (void __iomem *rom_start)
40237 {
40238 +
40239 +#ifndef CONFIG_PAX_KERNEXEC
40240 if (rom_start) {
40241 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40242 }
40243 +#endif
40244 +
40245 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40246
40247 /* initialize our int15 lock */
40248 diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
40249 index 6151389..0a894ef 100644
40250 --- a/drivers/pci/hotplug/fakephp.c
40251 +++ b/drivers/pci/hotplug/fakephp.c
40252 @@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
40253 }
40254
40255 static struct kobj_type legacy_ktype = {
40256 - .sysfs_ops = &(struct sysfs_ops){
40257 + .sysfs_ops = &(const struct sysfs_ops){
40258 .store = legacy_store, .show = legacy_show
40259 },
40260 .release = &legacy_release,
40261 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
40262 index 5b680df..fe05b7e 100644
40263 --- a/drivers/pci/intel-iommu.c
40264 +++ b/drivers/pci/intel-iommu.c
40265 @@ -2643,7 +2643,7 @@ error:
40266 return 0;
40267 }
40268
40269 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
40270 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
40271 unsigned long offset, size_t size,
40272 enum dma_data_direction dir,
40273 struct dma_attrs *attrs)
40274 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
40275 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
40276 }
40277
40278 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40279 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40280 size_t size, enum dma_data_direction dir,
40281 struct dma_attrs *attrs)
40282 {
40283 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40284 }
40285 }
40286
40287 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40288 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
40289 dma_addr_t *dma_handle, gfp_t flags)
40290 {
40291 void *vaddr;
40292 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40293 return NULL;
40294 }
40295
40296 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40297 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40298 dma_addr_t dma_handle)
40299 {
40300 int order;
40301 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40302 free_pages((unsigned long)vaddr, order);
40303 }
40304
40305 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40306 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40307 int nelems, enum dma_data_direction dir,
40308 struct dma_attrs *attrs)
40309 {
40310 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
40311 return nelems;
40312 }
40313
40314 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40315 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40316 enum dma_data_direction dir, struct dma_attrs *attrs)
40317 {
40318 int i;
40319 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
40320 return nelems;
40321 }
40322
40323 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40324 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40325 {
40326 return !dma_addr;
40327 }
40328
40329 -struct dma_map_ops intel_dma_ops = {
40330 +const struct dma_map_ops intel_dma_ops = {
40331 .alloc_coherent = intel_alloc_coherent,
40332 .free_coherent = intel_free_coherent,
40333 .map_sg = intel_map_sg,
40334 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40335 index 5b7056c..607bc94 100644
40336 --- a/drivers/pci/pcie/aspm.c
40337 +++ b/drivers/pci/pcie/aspm.c
40338 @@ -27,9 +27,9 @@
40339 #define MODULE_PARAM_PREFIX "pcie_aspm."
40340
40341 /* Note: those are not register definitions */
40342 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40343 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40344 -#define ASPM_STATE_L1 (4) /* L1 state */
40345 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40346 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40347 +#define ASPM_STATE_L1 (4U) /* L1 state */
40348 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40349 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40350
40351 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40352 index 8105e32..ca10419 100644
40353 --- a/drivers/pci/probe.c
40354 +++ b/drivers/pci/probe.c
40355 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
40356 return ret;
40357 }
40358
40359 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
40360 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
40361 struct device_attribute *attr,
40362 char *buf)
40363 {
40364 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
40365 }
40366
40367 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
40368 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
40369 struct device_attribute *attr,
40370 char *buf)
40371 {
40372 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40373 index a03ad8c..024b0da 100644
40374 --- a/drivers/pci/proc.c
40375 +++ b/drivers/pci/proc.c
40376 @@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40377 static int __init pci_proc_init(void)
40378 {
40379 struct pci_dev *dev = NULL;
40380 +
40381 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40382 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40383 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40384 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40385 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40386 +#endif
40387 +#else
40388 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40389 +#endif
40390 proc_create("devices", 0, proc_bus_pci_dir,
40391 &proc_bus_pci_dev_operations);
40392 proc_initialized = 1;
40393 diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
40394 index 8c02b6c..5584d8e 100644
40395 --- a/drivers/pci/slot.c
40396 +++ b/drivers/pci/slot.c
40397 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
40398 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
40399 }
40400
40401 -static struct sysfs_ops pci_slot_sysfs_ops = {
40402 +static const struct sysfs_ops pci_slot_sysfs_ops = {
40403 .show = pci_slot_attr_show,
40404 .store = pci_slot_attr_store,
40405 };
40406 diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
40407 index 30cf71d2..50938f1 100644
40408 --- a/drivers/pcmcia/pcmcia_ioctl.c
40409 +++ b/drivers/pcmcia/pcmcia_ioctl.c
40410 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
40411 return -EFAULT;
40412 }
40413 }
40414 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40415 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40416 if (!buf)
40417 return -ENOMEM;
40418
40419 diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
40420 index 52183c4..b224c69 100644
40421 --- a/drivers/platform/x86/acer-wmi.c
40422 +++ b/drivers/platform/x86/acer-wmi.c
40423 @@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
40424 return 0;
40425 }
40426
40427 -static struct backlight_ops acer_bl_ops = {
40428 +static const struct backlight_ops acer_bl_ops = {
40429 .get_brightness = read_brightness,
40430 .update_status = update_bl_status,
40431 };
40432 diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
40433 index 767cb61..a87380b 100644
40434 --- a/drivers/platform/x86/asus-laptop.c
40435 +++ b/drivers/platform/x86/asus-laptop.c
40436 @@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
40437 */
40438 static int read_brightness(struct backlight_device *bd);
40439 static int update_bl_status(struct backlight_device *bd);
40440 -static struct backlight_ops asusbl_ops = {
40441 +static const struct backlight_ops asusbl_ops = {
40442 .get_brightness = read_brightness,
40443 .update_status = update_bl_status,
40444 };
40445 diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
40446 index d66c07a..a4abaac 100644
40447 --- a/drivers/platform/x86/asus_acpi.c
40448 +++ b/drivers/platform/x86/asus_acpi.c
40449 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40450 return 0;
40451 }
40452
40453 -static struct backlight_ops asus_backlight_data = {
40454 +static const struct backlight_ops asus_backlight_data = {
40455 .get_brightness = read_brightness,
40456 .update_status = set_brightness_status,
40457 };
40458 diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40459 index 11003bb..550ff1b 100644
40460 --- a/drivers/platform/x86/compal-laptop.c
40461 +++ b/drivers/platform/x86/compal-laptop.c
40462 @@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40463 return set_lcd_level(b->props.brightness);
40464 }
40465
40466 -static struct backlight_ops compalbl_ops = {
40467 +static const struct backlight_ops compalbl_ops = {
40468 .get_brightness = bl_get_brightness,
40469 .update_status = bl_update_status,
40470 };
40471 diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40472 index 07a74da..9dc99fa 100644
40473 --- a/drivers/platform/x86/dell-laptop.c
40474 +++ b/drivers/platform/x86/dell-laptop.c
40475 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40476 return buffer.output[1];
40477 }
40478
40479 -static struct backlight_ops dell_ops = {
40480 +static const struct backlight_ops dell_ops = {
40481 .get_brightness = dell_get_intensity,
40482 .update_status = dell_send_intensity,
40483 };
40484 diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40485 index c533b1c..5c81f22 100644
40486 --- a/drivers/platform/x86/eeepc-laptop.c
40487 +++ b/drivers/platform/x86/eeepc-laptop.c
40488 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40489 */
40490 static int read_brightness(struct backlight_device *bd);
40491 static int update_bl_status(struct backlight_device *bd);
40492 -static struct backlight_ops eeepcbl_ops = {
40493 +static const struct backlight_ops eeepcbl_ops = {
40494 .get_brightness = read_brightness,
40495 .update_status = update_bl_status,
40496 };
40497 diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40498 index bcd4ba8..a249b35 100644
40499 --- a/drivers/platform/x86/fujitsu-laptop.c
40500 +++ b/drivers/platform/x86/fujitsu-laptop.c
40501 @@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40502 return ret;
40503 }
40504
40505 -static struct backlight_ops fujitsubl_ops = {
40506 +static const struct backlight_ops fujitsubl_ops = {
40507 .get_brightness = bl_get_brightness,
40508 .update_status = bl_update_status,
40509 };
40510 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40511 index 759763d..1093ba2 100644
40512 --- a/drivers/platform/x86/msi-laptop.c
40513 +++ b/drivers/platform/x86/msi-laptop.c
40514 @@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40515 return set_lcd_level(b->props.brightness);
40516 }
40517
40518 -static struct backlight_ops msibl_ops = {
40519 +static const struct backlight_ops msibl_ops = {
40520 .get_brightness = bl_get_brightness,
40521 .update_status = bl_update_status,
40522 };
40523 diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40524 index fe7cf01..9012d8d 100644
40525 --- a/drivers/platform/x86/panasonic-laptop.c
40526 +++ b/drivers/platform/x86/panasonic-laptop.c
40527 @@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40528 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40529 }
40530
40531 -static struct backlight_ops pcc_backlight_ops = {
40532 +static const struct backlight_ops pcc_backlight_ops = {
40533 .get_brightness = bl_get,
40534 .update_status = bl_set_status,
40535 };
40536 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40537 index a2a742c..b37e25e 100644
40538 --- a/drivers/platform/x86/sony-laptop.c
40539 +++ b/drivers/platform/x86/sony-laptop.c
40540 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40541 }
40542
40543 static struct backlight_device *sony_backlight_device;
40544 -static struct backlight_ops sony_backlight_ops = {
40545 +static const struct backlight_ops sony_backlight_ops = {
40546 .update_status = sony_backlight_update_status,
40547 .get_brightness = sony_backlight_get_brightness,
40548 };
40549 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40550 index 68271ae..5e8fb10 100644
40551 --- a/drivers/platform/x86/thinkpad_acpi.c
40552 +++ b/drivers/platform/x86/thinkpad_acpi.c
40553 @@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40554 return 0;
40555 }
40556
40557 -void static hotkey_mask_warn_incomplete_mask(void)
40558 +static void hotkey_mask_warn_incomplete_mask(void)
40559 {
40560 /* log only what the user can fix... */
40561 const u32 wantedmask = hotkey_driver_mask &
40562 @@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40563 BACKLIGHT_UPDATE_HOTKEY);
40564 }
40565
40566 -static struct backlight_ops ibm_backlight_data = {
40567 +static const struct backlight_ops ibm_backlight_data = {
40568 .get_brightness = brightness_get,
40569 .update_status = brightness_update_status,
40570 };
40571 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40572 index 51c0a8b..0786629 100644
40573 --- a/drivers/platform/x86/toshiba_acpi.c
40574 +++ b/drivers/platform/x86/toshiba_acpi.c
40575 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40576 return AE_OK;
40577 }
40578
40579 -static struct backlight_ops toshiba_backlight_data = {
40580 +static const struct backlight_ops toshiba_backlight_data = {
40581 .get_brightness = get_lcd,
40582 .update_status = set_lcd_status,
40583 };
40584 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40585 index fc83783c..cf370d7 100644
40586 --- a/drivers/pnp/pnpbios/bioscalls.c
40587 +++ b/drivers/pnp/pnpbios/bioscalls.c
40588 @@ -60,7 +60,7 @@ do { \
40589 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40590 } while(0)
40591
40592 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40593 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40594 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40595
40596 /*
40597 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40598
40599 cpu = get_cpu();
40600 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40601 +
40602 + pax_open_kernel();
40603 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40604 + pax_close_kernel();
40605
40606 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40607 spin_lock_irqsave(&pnp_bios_lock, flags);
40608 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40609 :"memory");
40610 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40611
40612 + pax_open_kernel();
40613 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40614 + pax_close_kernel();
40615 +
40616 put_cpu();
40617
40618 /* If we get here and this is set then the PnP BIOS faulted on us. */
40619 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40620 return status;
40621 }
40622
40623 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
40624 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40625 {
40626 int i;
40627
40628 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40629 pnp_bios_callpoint.offset = header->fields.pm16offset;
40630 pnp_bios_callpoint.segment = PNP_CS16;
40631
40632 + pax_open_kernel();
40633 +
40634 for_each_possible_cpu(i) {
40635 struct desc_struct *gdt = get_cpu_gdt_table(i);
40636 if (!gdt)
40637 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40638 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40639 (unsigned long)__va(header->fields.pm16dseg));
40640 }
40641 +
40642 + pax_close_kernel();
40643 }
40644 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40645 index ba97654..66b99d4 100644
40646 --- a/drivers/pnp/resource.c
40647 +++ b/drivers/pnp/resource.c
40648 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40649 return 1;
40650
40651 /* check if the resource is valid */
40652 - if (*irq < 0 || *irq > 15)
40653 + if (*irq > 15)
40654 return 0;
40655
40656 /* check if the resource is reserved */
40657 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40658 return 1;
40659
40660 /* check if the resource is valid */
40661 - if (*dma < 0 || *dma == 4 || *dma > 7)
40662 + if (*dma == 4 || *dma > 7)
40663 return 0;
40664
40665 /* check if the resource is reserved */
40666 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40667 index 62bb981..24a2dc9 100644
40668 --- a/drivers/power/bq27x00_battery.c
40669 +++ b/drivers/power/bq27x00_battery.c
40670 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
40671 struct bq27x00_access_methods {
40672 int (*read)(u8 reg, int *rt_value, int b_single,
40673 struct bq27x00_device_info *di);
40674 -};
40675 +} __no_const;
40676
40677 struct bq27x00_device_info {
40678 struct device *dev;
40679 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40680 index 62227cd..b5b538b 100644
40681 --- a/drivers/rtc/rtc-dev.c
40682 +++ b/drivers/rtc/rtc-dev.c
40683 @@ -14,6 +14,7 @@
40684 #include <linux/module.h>
40685 #include <linux/rtc.h>
40686 #include <linux/sched.h>
40687 +#include <linux/grsecurity.h>
40688 #include "rtc-core.h"
40689
40690 static dev_t rtc_devt;
40691 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40692 if (copy_from_user(&tm, uarg, sizeof(tm)))
40693 return -EFAULT;
40694
40695 + gr_log_timechange();
40696 +
40697 return rtc_set_time(rtc, &tm);
40698
40699 case RTC_PIE_ON:
40700 diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40701 index 968e3c7..fbc637a 100644
40702 --- a/drivers/s390/cio/qdio_perf.c
40703 +++ b/drivers/s390/cio/qdio_perf.c
40704 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40705 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40706 {
40707 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40708 - (long)atomic_long_read(&perf_stats.qdio_int));
40709 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40710 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40711 - (long)atomic_long_read(&perf_stats.pci_int));
40712 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40713 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40714 - (long)atomic_long_read(&perf_stats.thin_int));
40715 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40716 seq_printf(m, "\n");
40717 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40718 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
40719 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40720 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40721 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
40722 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40723 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40724 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
40725 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40726 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40727 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40728 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40729 - (long)atomic_long_read(&perf_stats.thinint_inbound),
40730 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40731 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40732 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40733 seq_printf(m, "\n");
40734 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40735 - (long)atomic_long_read(&perf_stats.siga_in));
40736 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40737 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40738 - (long)atomic_long_read(&perf_stats.siga_out));
40739 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40740 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40741 - (long)atomic_long_read(&perf_stats.siga_sync));
40742 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40743 seq_printf(m, "\n");
40744 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40745 - (long)atomic_long_read(&perf_stats.inbound_handler));
40746 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40747 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40748 - (long)atomic_long_read(&perf_stats.outbound_handler));
40749 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40750 seq_printf(m, "\n");
40751 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40752 - (long)atomic_long_read(&perf_stats.fast_requeue));
40753 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40754 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40755 - (long)atomic_long_read(&perf_stats.outbound_target_full));
40756 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40757 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40758 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40759 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40760 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40761 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
40762 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40763 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40764 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40765 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40766 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40767 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40768 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40769 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40770 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40771 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40772 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40773 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40774 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40775 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40776 seq_printf(m, "\n");
40777 return 0;
40778 }
40779 diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40780 index ff4504c..b3604c3 100644
40781 --- a/drivers/s390/cio/qdio_perf.h
40782 +++ b/drivers/s390/cio/qdio_perf.h
40783 @@ -13,46 +13,46 @@
40784
40785 struct qdio_perf_stats {
40786 /* interrupt handler calls */
40787 - atomic_long_t qdio_int;
40788 - atomic_long_t pci_int;
40789 - atomic_long_t thin_int;
40790 + atomic_long_unchecked_t qdio_int;
40791 + atomic_long_unchecked_t pci_int;
40792 + atomic_long_unchecked_t thin_int;
40793
40794 /* tasklet runs */
40795 - atomic_long_t tasklet_inbound;
40796 - atomic_long_t tasklet_outbound;
40797 - atomic_long_t tasklet_thinint;
40798 - atomic_long_t tasklet_thinint_loop;
40799 - atomic_long_t thinint_inbound;
40800 - atomic_long_t thinint_inbound_loop;
40801 - atomic_long_t thinint_inbound_loop2;
40802 + atomic_long_unchecked_t tasklet_inbound;
40803 + atomic_long_unchecked_t tasklet_outbound;
40804 + atomic_long_unchecked_t tasklet_thinint;
40805 + atomic_long_unchecked_t tasklet_thinint_loop;
40806 + atomic_long_unchecked_t thinint_inbound;
40807 + atomic_long_unchecked_t thinint_inbound_loop;
40808 + atomic_long_unchecked_t thinint_inbound_loop2;
40809
40810 /* signal adapter calls */
40811 - atomic_long_t siga_out;
40812 - atomic_long_t siga_in;
40813 - atomic_long_t siga_sync;
40814 + atomic_long_unchecked_t siga_out;
40815 + atomic_long_unchecked_t siga_in;
40816 + atomic_long_unchecked_t siga_sync;
40817
40818 /* misc */
40819 - atomic_long_t inbound_handler;
40820 - atomic_long_t outbound_handler;
40821 - atomic_long_t fast_requeue;
40822 - atomic_long_t outbound_target_full;
40823 + atomic_long_unchecked_t inbound_handler;
40824 + atomic_long_unchecked_t outbound_handler;
40825 + atomic_long_unchecked_t fast_requeue;
40826 + atomic_long_unchecked_t outbound_target_full;
40827
40828 /* for debugging */
40829 - atomic_long_t debug_tl_out_timer;
40830 - atomic_long_t debug_stop_polling;
40831 - atomic_long_t debug_eqbs_all;
40832 - atomic_long_t debug_eqbs_incomplete;
40833 - atomic_long_t debug_sqbs_all;
40834 - atomic_long_t debug_sqbs_incomplete;
40835 + atomic_long_unchecked_t debug_tl_out_timer;
40836 + atomic_long_unchecked_t debug_stop_polling;
40837 + atomic_long_unchecked_t debug_eqbs_all;
40838 + atomic_long_unchecked_t debug_eqbs_incomplete;
40839 + atomic_long_unchecked_t debug_sqbs_all;
40840 + atomic_long_unchecked_t debug_sqbs_incomplete;
40841 };
40842
40843 extern struct qdio_perf_stats perf_stats;
40844 extern int qdio_performance_stats;
40845
40846 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
40847 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40848 {
40849 if (qdio_performance_stats)
40850 - atomic_long_inc(count);
40851 + atomic_long_inc_unchecked(count);
40852 }
40853
40854 int qdio_setup_perf_stats(void);
40855 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40856 index 1ddcf40..a85f062 100644
40857 --- a/drivers/scsi/BusLogic.c
40858 +++ b/drivers/scsi/BusLogic.c
40859 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40860 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40861 *PrototypeHostAdapter)
40862 {
40863 + pax_track_stack();
40864 +
40865 /*
40866 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40867 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40868 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40869 index cdbdec9..b7d560b 100644
40870 --- a/drivers/scsi/aacraid/aacraid.h
40871 +++ b/drivers/scsi/aacraid/aacraid.h
40872 @@ -471,7 +471,7 @@ struct adapter_ops
40873 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40874 /* Administrative operations */
40875 int (*adapter_comm)(struct aac_dev * dev, int comm);
40876 -};
40877 +} __no_const;
40878
40879 /*
40880 * Define which interrupt handler needs to be installed
40881 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40882 index a5b8e7b..a6a0e43 100644
40883 --- a/drivers/scsi/aacraid/commctrl.c
40884 +++ b/drivers/scsi/aacraid/commctrl.c
40885 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40886 u32 actual_fibsize64, actual_fibsize = 0;
40887 int i;
40888
40889 + pax_track_stack();
40890
40891 if (dev->in_reset) {
40892 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40893 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40894 index 9b97c3e..f099725 100644
40895 --- a/drivers/scsi/aacraid/linit.c
40896 +++ b/drivers/scsi/aacraid/linit.c
40897 @@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40898 #elif defined(__devinitconst)
40899 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40900 #else
40901 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40902 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40903 #endif
40904 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40905 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40906 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40907 index 996f722..9127845 100644
40908 --- a/drivers/scsi/aic94xx/aic94xx_init.c
40909 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
40910 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40911 flash_error_table[i].reason);
40912 }
40913
40914 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40915 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40916 asd_show_update_bios, asd_store_update_bios);
40917
40918 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40919 @@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40920 .lldd_control_phy = asd_control_phy,
40921 };
40922
40923 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40924 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40925 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40926 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40927 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40928 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40929 index 58efd4b..cb48dc7 100644
40930 --- a/drivers/scsi/bfa/bfa_ioc.h
40931 +++ b/drivers/scsi/bfa/bfa_ioc.h
40932 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40933 bfa_ioc_disable_cbfn_t disable_cbfn;
40934 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40935 bfa_ioc_reset_cbfn_t reset_cbfn;
40936 -};
40937 +} __no_const;
40938
40939 /**
40940 * Heartbeat failure notification queue element.
40941 diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40942 index 7ad177e..5503586 100644
40943 --- a/drivers/scsi/bfa/bfa_iocfc.h
40944 +++ b/drivers/scsi/bfa/bfa_iocfc.h
40945 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
40946 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40947 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40948 u32 *nvecs, u32 *maxvec);
40949 -};
40950 +} __no_const;
40951 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40952
40953 struct bfa_iocfc_s {
40954 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40955 index 4967643..cbec06b 100644
40956 --- a/drivers/scsi/dpt_i2o.c
40957 +++ b/drivers/scsi/dpt_i2o.c
40958 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40959 dma_addr_t addr;
40960 ulong flags = 0;
40961
40962 + pax_track_stack();
40963 +
40964 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40965 // get user msg size in u32s
40966 if(get_user(size, &user_msg[0])){
40967 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40968 s32 rcode;
40969 dma_addr_t addr;
40970
40971 + pax_track_stack();
40972 +
40973 memset(msg, 0 , sizeof(msg));
40974 len = scsi_bufflen(cmd);
40975 direction = 0x00000000;
40976 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40977 index c7076ce..e20c67c 100644
40978 --- a/drivers/scsi/eata.c
40979 +++ b/drivers/scsi/eata.c
40980 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40981 struct hostdata *ha;
40982 char name[16];
40983
40984 + pax_track_stack();
40985 +
40986 sprintf(name, "%s%d", driver_name, j);
40987
40988 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40989 diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40990 index 11ae5c9..891daec 100644
40991 --- a/drivers/scsi/fcoe/libfcoe.c
40992 +++ b/drivers/scsi/fcoe/libfcoe.c
40993 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40994 size_t rlen;
40995 size_t dlen;
40996
40997 + pax_track_stack();
40998 +
40999 fiph = (struct fip_header *)skb->data;
41000 sub = fiph->fip_subcode;
41001 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
41002 diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
41003 index 71c7bbe..e93088a 100644
41004 --- a/drivers/scsi/fnic/fnic_main.c
41005 +++ b/drivers/scsi/fnic/fnic_main.c
41006 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
41007 /* Start local port initiatialization */
41008
41009 lp->link_up = 0;
41010 - lp->tt = fnic_transport_template;
41011 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
41012
41013 lp->max_retry_count = fnic->config.flogi_retries;
41014 lp->max_rport_retry_count = fnic->config.plogi_retries;
41015 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
41016 index bb96d74..9ec3ce4 100644
41017 --- a/drivers/scsi/gdth.c
41018 +++ b/drivers/scsi/gdth.c
41019 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
41020 ulong flags;
41021 gdth_ha_str *ha;
41022
41023 + pax_track_stack();
41024 +
41025 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
41026 return -EFAULT;
41027 ha = gdth_find_ha(ldrv.ionode);
41028 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
41029 gdth_ha_str *ha;
41030 int rval;
41031
41032 + pax_track_stack();
41033 +
41034 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
41035 res.number >= MAX_HDRIVES)
41036 return -EFAULT;
41037 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
41038 gdth_ha_str *ha;
41039 int rval;
41040
41041 + pax_track_stack();
41042 +
41043 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
41044 return -EFAULT;
41045 ha = gdth_find_ha(gen.ionode);
41046 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
41047 int i;
41048 gdth_cmd_str gdtcmd;
41049 char cmnd[MAX_COMMAND_SIZE];
41050 +
41051 + pax_track_stack();
41052 +
41053 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
41054
41055 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
41056 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
41057 index 1258da3..20d8ae6 100644
41058 --- a/drivers/scsi/gdth_proc.c
41059 +++ b/drivers/scsi/gdth_proc.c
41060 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
41061 ulong64 paddr;
41062
41063 char cmnd[MAX_COMMAND_SIZE];
41064 +
41065 + pax_track_stack();
41066 +
41067 memset(cmnd, 0xff, 12);
41068 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
41069
41070 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
41071 gdth_hget_str *phg;
41072 char cmnd[MAX_COMMAND_SIZE];
41073
41074 + pax_track_stack();
41075 +
41076 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
41077 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
41078 if (!gdtcmd || !estr)
41079 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
41080 index d03a926..f324286 100644
41081 --- a/drivers/scsi/hosts.c
41082 +++ b/drivers/scsi/hosts.c
41083 @@ -40,7 +40,7 @@
41084 #include "scsi_logging.h"
41085
41086
41087 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
41088 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
41089
41090
41091 static void scsi_host_cls_release(struct device *dev)
41092 @@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
41093 * subtract one because we increment first then return, but we need to
41094 * know what the next host number was before increment
41095 */
41096 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
41097 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
41098 shost->dma_channel = 0xff;
41099
41100 /* These three are default values which can be overridden */
41101 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
41102 index a601159..55e19d2 100644
41103 --- a/drivers/scsi/ipr.c
41104 +++ b/drivers/scsi/ipr.c
41105 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
41106 return true;
41107 }
41108
41109 -static struct ata_port_operations ipr_sata_ops = {
41110 +static const struct ata_port_operations ipr_sata_ops = {
41111 .phy_reset = ipr_ata_phy_reset,
41112 .hardreset = ipr_sata_reset,
41113 .post_internal_cmd = ipr_ata_post_internal,
41114 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
41115 index 4e49fbc..97907ff 100644
41116 --- a/drivers/scsi/ips.h
41117 +++ b/drivers/scsi/ips.h
41118 @@ -1027,7 +1027,7 @@ typedef struct {
41119 int (*intr)(struct ips_ha *);
41120 void (*enableint)(struct ips_ha *);
41121 uint32_t (*statupd)(struct ips_ha *);
41122 -} ips_hw_func_t;
41123 +} __no_const ips_hw_func_t;
41124
41125 typedef struct ips_ha {
41126 uint8_t ha_id[IPS_MAX_CHANNELS+1];
41127 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
41128 index c1c1574..a9c9348 100644
41129 --- a/drivers/scsi/libfc/fc_exch.c
41130 +++ b/drivers/scsi/libfc/fc_exch.c
41131 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
41132 * all together if not used XXX
41133 */
41134 struct {
41135 - atomic_t no_free_exch;
41136 - atomic_t no_free_exch_xid;
41137 - atomic_t xid_not_found;
41138 - atomic_t xid_busy;
41139 - atomic_t seq_not_found;
41140 - atomic_t non_bls_resp;
41141 + atomic_unchecked_t no_free_exch;
41142 + atomic_unchecked_t no_free_exch_xid;
41143 + atomic_unchecked_t xid_not_found;
41144 + atomic_unchecked_t xid_busy;
41145 + atomic_unchecked_t seq_not_found;
41146 + atomic_unchecked_t non_bls_resp;
41147 } stats;
41148 };
41149 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
41150 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
41151 /* allocate memory for exchange */
41152 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
41153 if (!ep) {
41154 - atomic_inc(&mp->stats.no_free_exch);
41155 + atomic_inc_unchecked(&mp->stats.no_free_exch);
41156 goto out;
41157 }
41158 memset(ep, 0, sizeof(*ep));
41159 @@ -557,7 +557,7 @@ out:
41160 return ep;
41161 err:
41162 spin_unlock_bh(&pool->lock);
41163 - atomic_inc(&mp->stats.no_free_exch_xid);
41164 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
41165 mempool_free(ep, mp->ep_pool);
41166 return NULL;
41167 }
41168 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41169 xid = ntohs(fh->fh_ox_id); /* we originated exch */
41170 ep = fc_exch_find(mp, xid);
41171 if (!ep) {
41172 - atomic_inc(&mp->stats.xid_not_found);
41173 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41174 reject = FC_RJT_OX_ID;
41175 goto out;
41176 }
41177 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41178 ep = fc_exch_find(mp, xid);
41179 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
41180 if (ep) {
41181 - atomic_inc(&mp->stats.xid_busy);
41182 + atomic_inc_unchecked(&mp->stats.xid_busy);
41183 reject = FC_RJT_RX_ID;
41184 goto rel;
41185 }
41186 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41187 }
41188 xid = ep->xid; /* get our XID */
41189 } else if (!ep) {
41190 - atomic_inc(&mp->stats.xid_not_found);
41191 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41192 reject = FC_RJT_RX_ID; /* XID not found */
41193 goto out;
41194 }
41195 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41196 } else {
41197 sp = &ep->seq;
41198 if (sp->id != fh->fh_seq_id) {
41199 - atomic_inc(&mp->stats.seq_not_found);
41200 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41201 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
41202 goto rel;
41203 }
41204 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41205
41206 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41207 if (!ep) {
41208 - atomic_inc(&mp->stats.xid_not_found);
41209 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41210 goto out;
41211 }
41212 if (ep->esb_stat & ESB_ST_COMPLETE) {
41213 - atomic_inc(&mp->stats.xid_not_found);
41214 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41215 goto out;
41216 }
41217 if (ep->rxid == FC_XID_UNKNOWN)
41218 ep->rxid = ntohs(fh->fh_rx_id);
41219 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41220 - atomic_inc(&mp->stats.xid_not_found);
41221 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41222 goto rel;
41223 }
41224 if (ep->did != ntoh24(fh->fh_s_id) &&
41225 ep->did != FC_FID_FLOGI) {
41226 - atomic_inc(&mp->stats.xid_not_found);
41227 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41228 goto rel;
41229 }
41230 sof = fr_sof(fp);
41231 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41232 } else {
41233 sp = &ep->seq;
41234 if (sp->id != fh->fh_seq_id) {
41235 - atomic_inc(&mp->stats.seq_not_found);
41236 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41237 goto rel;
41238 }
41239 }
41240 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41241 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41242
41243 if (!sp)
41244 - atomic_inc(&mp->stats.xid_not_found);
41245 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41246 else
41247 - atomic_inc(&mp->stats.non_bls_resp);
41248 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
41249
41250 fc_frame_free(fp);
41251 }
41252 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41253 index 0ee989f..a582241 100644
41254 --- a/drivers/scsi/libsas/sas_ata.c
41255 +++ b/drivers/scsi/libsas/sas_ata.c
41256 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
41257 }
41258 }
41259
41260 -static struct ata_port_operations sas_sata_ops = {
41261 +static const struct ata_port_operations sas_sata_ops = {
41262 .phy_reset = sas_ata_phy_reset,
41263 .post_internal_cmd = sas_ata_post_internal,
41264 .qc_defer = ata_std_qc_defer,
41265 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41266 index aa10f79..5cc79e4 100644
41267 --- a/drivers/scsi/lpfc/lpfc.h
41268 +++ b/drivers/scsi/lpfc/lpfc.h
41269 @@ -400,7 +400,7 @@ struct lpfc_vport {
41270 struct dentry *debug_nodelist;
41271 struct dentry *vport_debugfs_root;
41272 struct lpfc_debugfs_trc *disc_trc;
41273 - atomic_t disc_trc_cnt;
41274 + atomic_unchecked_t disc_trc_cnt;
41275 #endif
41276 uint8_t stat_data_enabled;
41277 uint8_t stat_data_blocked;
41278 @@ -725,8 +725,8 @@ struct lpfc_hba {
41279 struct timer_list fabric_block_timer;
41280 unsigned long bit_flags;
41281 #define FABRIC_COMANDS_BLOCKED 0
41282 - atomic_t num_rsrc_err;
41283 - atomic_t num_cmd_success;
41284 + atomic_unchecked_t num_rsrc_err;
41285 + atomic_unchecked_t num_cmd_success;
41286 unsigned long last_rsrc_error_time;
41287 unsigned long last_ramp_down_time;
41288 unsigned long last_ramp_up_time;
41289 @@ -740,7 +740,7 @@ struct lpfc_hba {
41290 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
41291 struct dentry *debug_slow_ring_trc;
41292 struct lpfc_debugfs_trc *slow_ring_trc;
41293 - atomic_t slow_ring_trc_cnt;
41294 + atomic_unchecked_t slow_ring_trc_cnt;
41295 #endif
41296
41297 /* Used for deferred freeing of ELS data buffers */
41298 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41299 index 8d0f0de..7c77a62 100644
41300 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
41301 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41302 @@ -124,7 +124,7 @@ struct lpfc_debug {
41303 int len;
41304 };
41305
41306 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41307 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41308 static unsigned long lpfc_debugfs_start_time = 0L;
41309
41310 /**
41311 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41312 lpfc_debugfs_enable = 0;
41313
41314 len = 0;
41315 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41316 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41317 (lpfc_debugfs_max_disc_trc - 1);
41318 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41319 dtp = vport->disc_trc + i;
41320 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41321 lpfc_debugfs_enable = 0;
41322
41323 len = 0;
41324 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41325 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41326 (lpfc_debugfs_max_slow_ring_trc - 1);
41327 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41328 dtp = phba->slow_ring_trc + i;
41329 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
41330 uint32_t *ptr;
41331 char buffer[1024];
41332
41333 + pax_track_stack();
41334 +
41335 off = 0;
41336 spin_lock_irq(&phba->hbalock);
41337
41338 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41339 !vport || !vport->disc_trc)
41340 return;
41341
41342 - index = atomic_inc_return(&vport->disc_trc_cnt) &
41343 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41344 (lpfc_debugfs_max_disc_trc - 1);
41345 dtp = vport->disc_trc + index;
41346 dtp->fmt = fmt;
41347 dtp->data1 = data1;
41348 dtp->data2 = data2;
41349 dtp->data3 = data3;
41350 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41351 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41352 dtp->jif = jiffies;
41353 #endif
41354 return;
41355 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41356 !phba || !phba->slow_ring_trc)
41357 return;
41358
41359 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41360 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41361 (lpfc_debugfs_max_slow_ring_trc - 1);
41362 dtp = phba->slow_ring_trc + index;
41363 dtp->fmt = fmt;
41364 dtp->data1 = data1;
41365 dtp->data2 = data2;
41366 dtp->data3 = data3;
41367 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41368 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41369 dtp->jif = jiffies;
41370 #endif
41371 return;
41372 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41373 "slow_ring buffer\n");
41374 goto debug_failed;
41375 }
41376 - atomic_set(&phba->slow_ring_trc_cnt, 0);
41377 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41378 memset(phba->slow_ring_trc, 0,
41379 (sizeof(struct lpfc_debugfs_trc) *
41380 lpfc_debugfs_max_slow_ring_trc));
41381 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41382 "buffer\n");
41383 goto debug_failed;
41384 }
41385 - atomic_set(&vport->disc_trc_cnt, 0);
41386 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41387
41388 snprintf(name, sizeof(name), "discovery_trace");
41389 vport->debug_disc_trc =
41390 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41391 index 549bc7d..8189dbb 100644
41392 --- a/drivers/scsi/lpfc/lpfc_init.c
41393 +++ b/drivers/scsi/lpfc/lpfc_init.c
41394 @@ -8021,8 +8021,10 @@ lpfc_init(void)
41395 printk(LPFC_COPYRIGHT "\n");
41396
41397 if (lpfc_enable_npiv) {
41398 - lpfc_transport_functions.vport_create = lpfc_vport_create;
41399 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41400 + pax_open_kernel();
41401 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41402 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41403 + pax_close_kernel();
41404 }
41405 lpfc_transport_template =
41406 fc_attach_transport(&lpfc_transport_functions);
41407 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41408 index c88f59f..ff2a42f 100644
41409 --- a/drivers/scsi/lpfc/lpfc_scsi.c
41410 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
41411 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41412 uint32_t evt_posted;
41413
41414 spin_lock_irqsave(&phba->hbalock, flags);
41415 - atomic_inc(&phba->num_rsrc_err);
41416 + atomic_inc_unchecked(&phba->num_rsrc_err);
41417 phba->last_rsrc_error_time = jiffies;
41418
41419 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41420 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41421 unsigned long flags;
41422 struct lpfc_hba *phba = vport->phba;
41423 uint32_t evt_posted;
41424 - atomic_inc(&phba->num_cmd_success);
41425 + atomic_inc_unchecked(&phba->num_cmd_success);
41426
41427 if (vport->cfg_lun_queue_depth <= queue_depth)
41428 return;
41429 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41430 int i;
41431 struct lpfc_rport_data *rdata;
41432
41433 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41434 - num_cmd_success = atomic_read(&phba->num_cmd_success);
41435 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41436 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41437
41438 vports = lpfc_create_vport_work_array(phba);
41439 if (vports != NULL)
41440 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41441 }
41442 }
41443 lpfc_destroy_vport_work_array(phba, vports);
41444 - atomic_set(&phba->num_rsrc_err, 0);
41445 - atomic_set(&phba->num_cmd_success, 0);
41446 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41447 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41448 }
41449
41450 /**
41451 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41452 }
41453 }
41454 lpfc_destroy_vport_work_array(phba, vports);
41455 - atomic_set(&phba->num_rsrc_err, 0);
41456 - atomic_set(&phba->num_cmd_success, 0);
41457 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41458 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41459 }
41460
41461 /**
41462 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41463 index 234f0b7..3020aea 100644
41464 --- a/drivers/scsi/megaraid/megaraid_mbox.c
41465 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
41466 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41467 int rval;
41468 int i;
41469
41470 + pax_track_stack();
41471 +
41472 // Allocate memory for the base list of scb for management module.
41473 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41474
41475 diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41476 index 7a117c1..ee01e9e 100644
41477 --- a/drivers/scsi/osd/osd_initiator.c
41478 +++ b/drivers/scsi/osd/osd_initiator.c
41479 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41480 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41481 int ret;
41482
41483 + pax_track_stack();
41484 +
41485 or = osd_start_request(od, GFP_KERNEL);
41486 if (!or)
41487 return -ENOMEM;
41488 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41489 index 9ab8c86..9425ad3 100644
41490 --- a/drivers/scsi/pmcraid.c
41491 +++ b/drivers/scsi/pmcraid.c
41492 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41493 res->scsi_dev = scsi_dev;
41494 scsi_dev->hostdata = res;
41495 res->change_detected = 0;
41496 - atomic_set(&res->read_failures, 0);
41497 - atomic_set(&res->write_failures, 0);
41498 + atomic_set_unchecked(&res->read_failures, 0);
41499 + atomic_set_unchecked(&res->write_failures, 0);
41500 rc = 0;
41501 }
41502 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41503 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41504
41505 /* If this was a SCSI read/write command keep count of errors */
41506 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41507 - atomic_inc(&res->read_failures);
41508 + atomic_inc_unchecked(&res->read_failures);
41509 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41510 - atomic_inc(&res->write_failures);
41511 + atomic_inc_unchecked(&res->write_failures);
41512
41513 if (!RES_IS_GSCSI(res->cfg_entry) &&
41514 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41515 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41516
41517 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41518 /* add resources only after host is added into system */
41519 - if (!atomic_read(&pinstance->expose_resources))
41520 + if (!atomic_read_unchecked(&pinstance->expose_resources))
41521 return;
41522
41523 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41524 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41525 init_waitqueue_head(&pinstance->reset_wait_q);
41526
41527 atomic_set(&pinstance->outstanding_cmds, 0);
41528 - atomic_set(&pinstance->expose_resources, 0);
41529 + atomic_set_unchecked(&pinstance->expose_resources, 0);
41530
41531 INIT_LIST_HEAD(&pinstance->free_res_q);
41532 INIT_LIST_HEAD(&pinstance->used_res_q);
41533 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41534 /* Schedule worker thread to handle CCN and take care of adding and
41535 * removing devices to OS
41536 */
41537 - atomic_set(&pinstance->expose_resources, 1);
41538 + atomic_set_unchecked(&pinstance->expose_resources, 1);
41539 schedule_work(&pinstance->worker_q);
41540 return rc;
41541
41542 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41543 index 3441b3f..6cbe8f7 100644
41544 --- a/drivers/scsi/pmcraid.h
41545 +++ b/drivers/scsi/pmcraid.h
41546 @@ -690,7 +690,7 @@ struct pmcraid_instance {
41547 atomic_t outstanding_cmds;
41548
41549 /* should add/delete resources to mid-layer now ?*/
41550 - atomic_t expose_resources;
41551 + atomic_unchecked_t expose_resources;
41552
41553 /* Tasklet to handle deferred processing */
41554 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41555 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41556 struct list_head queue; /* link to "to be exposed" resources */
41557 struct pmcraid_config_table_entry cfg_entry;
41558 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41559 - atomic_t read_failures; /* count of failed READ commands */
41560 - atomic_t write_failures; /* count of failed WRITE commands */
41561 + atomic_unchecked_t read_failures; /* count of failed READ commands */
41562 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41563
41564 /* To indicate add/delete/modify during CCN */
41565 u8 change_detected;
41566 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41567 index 2150618..7034215 100644
41568 --- a/drivers/scsi/qla2xxx/qla_def.h
41569 +++ b/drivers/scsi/qla2xxx/qla_def.h
41570 @@ -2089,7 +2089,7 @@ struct isp_operations {
41571
41572 int (*get_flash_version) (struct scsi_qla_host *, void *);
41573 int (*start_scsi) (srb_t *);
41574 -};
41575 +} __no_const;
41576
41577 /* MSI-X Support *************************************************************/
41578
41579 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41580 index 81b5f29..2ae1fad 100644
41581 --- a/drivers/scsi/qla4xxx/ql4_def.h
41582 +++ b/drivers/scsi/qla4xxx/ql4_def.h
41583 @@ -240,7 +240,7 @@ struct ddb_entry {
41584 atomic_t retry_relogin_timer; /* Min Time between relogins
41585 * (4000 only) */
41586 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41587 - atomic_t relogin_retry_count; /* Num of times relogin has been
41588 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41589 * retried */
41590
41591 uint16_t port;
41592 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41593 index af8c323..515dd51 100644
41594 --- a/drivers/scsi/qla4xxx/ql4_init.c
41595 +++ b/drivers/scsi/qla4xxx/ql4_init.c
41596 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41597 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41598 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41599 atomic_set(&ddb_entry->relogin_timer, 0);
41600 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41601 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41602 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41603 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41604 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41605 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41606 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41607 atomic_set(&ddb_entry->port_down_timer,
41608 ha->port_down_retry_count);
41609 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41610 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41611 atomic_set(&ddb_entry->relogin_timer, 0);
41612 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41613 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41614 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41615 index 83c8b5e..a82b348 100644
41616 --- a/drivers/scsi/qla4xxx/ql4_os.c
41617 +++ b/drivers/scsi/qla4xxx/ql4_os.c
41618 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41619 ddb_entry->fw_ddb_device_state ==
41620 DDB_DS_SESSION_FAILED) {
41621 /* Reset retry relogin timer */
41622 - atomic_inc(&ddb_entry->relogin_retry_count);
41623 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41624 DEBUG2(printk("scsi%ld: index[%d] relogin"
41625 " timed out-retrying"
41626 " relogin (%d)\n",
41627 ha->host_no,
41628 ddb_entry->fw_ddb_index,
41629 - atomic_read(&ddb_entry->
41630 + atomic_read_unchecked(&ddb_entry->
41631 relogin_retry_count))
41632 );
41633 start_dpc++;
41634 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41635 index dd098ca..686ce01 100644
41636 --- a/drivers/scsi/scsi.c
41637 +++ b/drivers/scsi/scsi.c
41638 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41639 unsigned long timeout;
41640 int rtn = 0;
41641
41642 - atomic_inc(&cmd->device->iorequest_cnt);
41643 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41644
41645 /* check if the device is still usable */
41646 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41647 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41648 index bc3e363..e1a8e50 100644
41649 --- a/drivers/scsi/scsi_debug.c
41650 +++ b/drivers/scsi/scsi_debug.c
41651 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41652 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41653 unsigned char *cmd = (unsigned char *)scp->cmnd;
41654
41655 + pax_track_stack();
41656 +
41657 if ((errsts = check_readiness(scp, 1, devip)))
41658 return errsts;
41659 memset(arr, 0, sizeof(arr));
41660 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41661 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41662 unsigned char *cmd = (unsigned char *)scp->cmnd;
41663
41664 + pax_track_stack();
41665 +
41666 if ((errsts = check_readiness(scp, 1, devip)))
41667 return errsts;
41668 memset(arr, 0, sizeof(arr));
41669 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41670 index 8df12522..c4c1472 100644
41671 --- a/drivers/scsi/scsi_lib.c
41672 +++ b/drivers/scsi/scsi_lib.c
41673 @@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41674 shost = sdev->host;
41675 scsi_init_cmd_errh(cmd);
41676 cmd->result = DID_NO_CONNECT << 16;
41677 - atomic_inc(&cmd->device->iorequest_cnt);
41678 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41679
41680 /*
41681 * SCSI request completion path will do scsi_device_unbusy(),
41682 @@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41683 */
41684 cmd->serial_number = 0;
41685
41686 - atomic_inc(&cmd->device->iodone_cnt);
41687 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
41688 if (cmd->result)
41689 - atomic_inc(&cmd->device->ioerr_cnt);
41690 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41691
41692 disposition = scsi_decide_disposition(cmd);
41693 if (disposition != SUCCESS &&
41694 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41695 index 91a93e0..eae0fe3 100644
41696 --- a/drivers/scsi/scsi_sysfs.c
41697 +++ b/drivers/scsi/scsi_sysfs.c
41698 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41699 char *buf) \
41700 { \
41701 struct scsi_device *sdev = to_scsi_device(dev); \
41702 - unsigned long long count = atomic_read(&sdev->field); \
41703 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
41704 return snprintf(buf, 20, "0x%llx\n", count); \
41705 } \
41706 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41707 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41708 index 1030327..f91fd30 100644
41709 --- a/drivers/scsi/scsi_tgt_lib.c
41710 +++ b/drivers/scsi/scsi_tgt_lib.c
41711 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41712 int err;
41713
41714 dprintk("%lx %u\n", uaddr, len);
41715 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41716 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41717 if (err) {
41718 /*
41719 * TODO: need to fixup sg_tablesize, max_segment_size,
41720 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41721 index db02e31..1b42ea9 100644
41722 --- a/drivers/scsi/scsi_transport_fc.c
41723 +++ b/drivers/scsi/scsi_transport_fc.c
41724 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41725 * Netlink Infrastructure
41726 */
41727
41728 -static atomic_t fc_event_seq;
41729 +static atomic_unchecked_t fc_event_seq;
41730
41731 /**
41732 * fc_get_event_number - Obtain the next sequential FC event number
41733 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41734 u32
41735 fc_get_event_number(void)
41736 {
41737 - return atomic_add_return(1, &fc_event_seq);
41738 + return atomic_add_return_unchecked(1, &fc_event_seq);
41739 }
41740 EXPORT_SYMBOL(fc_get_event_number);
41741
41742 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41743 {
41744 int error;
41745
41746 - atomic_set(&fc_event_seq, 0);
41747 + atomic_set_unchecked(&fc_event_seq, 0);
41748
41749 error = transport_class_register(&fc_host_class);
41750 if (error)
41751 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41752 index de2f8c4..63c5278 100644
41753 --- a/drivers/scsi/scsi_transport_iscsi.c
41754 +++ b/drivers/scsi/scsi_transport_iscsi.c
41755 @@ -81,7 +81,7 @@ struct iscsi_internal {
41756 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41757 };
41758
41759 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41760 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41761 static struct workqueue_struct *iscsi_eh_timer_workq;
41762
41763 /*
41764 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41765 int err;
41766
41767 ihost = shost->shost_data;
41768 - session->sid = atomic_add_return(1, &iscsi_session_nr);
41769 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41770
41771 if (id == ISCSI_MAX_TARGET) {
41772 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41773 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41774 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41775 ISCSI_TRANSPORT_VERSION);
41776
41777 - atomic_set(&iscsi_session_nr, 0);
41778 + atomic_set_unchecked(&iscsi_session_nr, 0);
41779
41780 err = class_register(&iscsi_transport_class);
41781 if (err)
41782 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41783 index 21a045e..ec89e03 100644
41784 --- a/drivers/scsi/scsi_transport_srp.c
41785 +++ b/drivers/scsi/scsi_transport_srp.c
41786 @@ -33,7 +33,7 @@
41787 #include "scsi_transport_srp_internal.h"
41788
41789 struct srp_host_attrs {
41790 - atomic_t next_port_id;
41791 + atomic_unchecked_t next_port_id;
41792 };
41793 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41794
41795 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41796 struct Scsi_Host *shost = dev_to_shost(dev);
41797 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41798
41799 - atomic_set(&srp_host->next_port_id, 0);
41800 + atomic_set_unchecked(&srp_host->next_port_id, 0);
41801 return 0;
41802 }
41803
41804 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41805 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41806 rport->roles = ids->roles;
41807
41808 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41809 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41810 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41811
41812 transport_setup_device(&rport->dev);
41813 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41814 index 040f751..98a5ed2 100644
41815 --- a/drivers/scsi/sg.c
41816 +++ b/drivers/scsi/sg.c
41817 @@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41818 sdp->disk->disk_name,
41819 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41820 NULL,
41821 - (char *)arg);
41822 + (char __user *)arg);
41823 case BLKTRACESTART:
41824 return blk_trace_startstop(sdp->device->request_queue, 1);
41825 case BLKTRACESTOP:
41826 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41827 const struct file_operations * fops;
41828 };
41829
41830 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41831 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41832 {"allow_dio", &adio_fops},
41833 {"debug", &debug_fops},
41834 {"def_reserved_size", &dressz_fops},
41835 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
41836 {
41837 int k, mask;
41838 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41839 - struct sg_proc_leaf * leaf;
41840 + const struct sg_proc_leaf * leaf;
41841
41842 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41843 if (!sg_proc_sgp)
41844 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41845 index c19ca5e..3eb5959 100644
41846 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41847 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41848 @@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41849 int do_iounmap = 0;
41850 int do_disable_device = 1;
41851
41852 + pax_track_stack();
41853 +
41854 memset(&sym_dev, 0, sizeof(sym_dev));
41855 memset(&nvram, 0, sizeof(nvram));
41856 sym_dev.pdev = pdev;
41857 diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41858 index eadc1ab..2d81457 100644
41859 --- a/drivers/serial/kgdboc.c
41860 +++ b/drivers/serial/kgdboc.c
41861 @@ -18,7 +18,7 @@
41862
41863 #define MAX_CONFIG_LEN 40
41864
41865 -static struct kgdb_io kgdboc_io_ops;
41866 +static const struct kgdb_io kgdboc_io_ops;
41867
41868 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41869 static int configured = -1;
41870 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41871 module_put(THIS_MODULE);
41872 }
41873
41874 -static struct kgdb_io kgdboc_io_ops = {
41875 +static const struct kgdb_io kgdboc_io_ops = {
41876 .name = "kgdboc",
41877 .read_char = kgdboc_get_char,
41878 .write_char = kgdboc_put_char,
41879 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41880 index b76f246..7f41af7 100644
41881 --- a/drivers/spi/spi.c
41882 +++ b/drivers/spi/spi.c
41883 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41884 EXPORT_SYMBOL_GPL(spi_sync);
41885
41886 /* portable code must never pass more than 32 bytes */
41887 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41888 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41889
41890 static u8 *buf;
41891
41892 diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41893 index b9b37ff..19dfa23 100644
41894 --- a/drivers/staging/android/binder.c
41895 +++ b/drivers/staging/android/binder.c
41896 @@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41897 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41898 }
41899
41900 -static struct vm_operations_struct binder_vm_ops = {
41901 +static const struct vm_operations_struct binder_vm_ops = {
41902 .open = binder_vma_open,
41903 .close = binder_vma_close,
41904 };
41905 diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41906 index cda26bb..39fed3f 100644
41907 --- a/drivers/staging/b3dfg/b3dfg.c
41908 +++ b/drivers/staging/b3dfg/b3dfg.c
41909 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41910 return VM_FAULT_NOPAGE;
41911 }
41912
41913 -static struct vm_operations_struct b3dfg_vm_ops = {
41914 +static const struct vm_operations_struct b3dfg_vm_ops = {
41915 .fault = b3dfg_vma_fault,
41916 };
41917
41918 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41919 return r;
41920 }
41921
41922 -static struct file_operations b3dfg_fops = {
41923 +static const struct file_operations b3dfg_fops = {
41924 .owner = THIS_MODULE,
41925 .open = b3dfg_open,
41926 .release = b3dfg_release,
41927 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41928 index 908f25a..c9a579b 100644
41929 --- a/drivers/staging/comedi/comedi_fops.c
41930 +++ b/drivers/staging/comedi/comedi_fops.c
41931 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41932 mutex_unlock(&dev->mutex);
41933 }
41934
41935 -static struct vm_operations_struct comedi_vm_ops = {
41936 +static const struct vm_operations_struct comedi_vm_ops = {
41937 .close = comedi_unmap,
41938 };
41939
41940 diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41941 index e55a0db..577b776 100644
41942 --- a/drivers/staging/dream/qdsp5/adsp_driver.c
41943 +++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41944 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41945 static dev_t adsp_devno;
41946 static struct class *adsp_class;
41947
41948 -static struct file_operations adsp_fops = {
41949 +static const struct file_operations adsp_fops = {
41950 .owner = THIS_MODULE,
41951 .open = adsp_open,
41952 .unlocked_ioctl = adsp_ioctl,
41953 diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41954 index ad2390f..4116ee8 100644
41955 --- a/drivers/staging/dream/qdsp5/audio_aac.c
41956 +++ b/drivers/staging/dream/qdsp5/audio_aac.c
41957 @@ -1022,7 +1022,7 @@ done:
41958 return rc;
41959 }
41960
41961 -static struct file_operations audio_aac_fops = {
41962 +static const struct file_operations audio_aac_fops = {
41963 .owner = THIS_MODULE,
41964 .open = audio_open,
41965 .release = audio_release,
41966 diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41967 index cd818a5..870b37b 100644
41968 --- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41969 +++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41970 @@ -833,7 +833,7 @@ done:
41971 return rc;
41972 }
41973
41974 -static struct file_operations audio_amrnb_fops = {
41975 +static const struct file_operations audio_amrnb_fops = {
41976 .owner = THIS_MODULE,
41977 .open = audamrnb_open,
41978 .release = audamrnb_release,
41979 diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41980 index 4b43e18..cedafda 100644
41981 --- a/drivers/staging/dream/qdsp5/audio_evrc.c
41982 +++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41983 @@ -805,7 +805,7 @@ dma_fail:
41984 return rc;
41985 }
41986
41987 -static struct file_operations audio_evrc_fops = {
41988 +static const struct file_operations audio_evrc_fops = {
41989 .owner = THIS_MODULE,
41990 .open = audevrc_open,
41991 .release = audevrc_release,
41992 diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41993 index 3d950a2..9431118 100644
41994 --- a/drivers/staging/dream/qdsp5/audio_in.c
41995 +++ b/drivers/staging/dream/qdsp5/audio_in.c
41996 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41997 return 0;
41998 }
41999
42000 -static struct file_operations audio_fops = {
42001 +static const struct file_operations audio_fops = {
42002 .owner = THIS_MODULE,
42003 .open = audio_in_open,
42004 .release = audio_in_release,
42005 @@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
42006 .unlocked_ioctl = audio_in_ioctl,
42007 };
42008
42009 -static struct file_operations audpre_fops = {
42010 +static const struct file_operations audpre_fops = {
42011 .owner = THIS_MODULE,
42012 .open = audpre_open,
42013 .unlocked_ioctl = audpre_ioctl,
42014 diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
42015 index b95574f..286c2f4 100644
42016 --- a/drivers/staging/dream/qdsp5/audio_mp3.c
42017 +++ b/drivers/staging/dream/qdsp5/audio_mp3.c
42018 @@ -941,7 +941,7 @@ done:
42019 return rc;
42020 }
42021
42022 -static struct file_operations audio_mp3_fops = {
42023 +static const struct file_operations audio_mp3_fops = {
42024 .owner = THIS_MODULE,
42025 .open = audio_open,
42026 .release = audio_release,
42027 diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
42028 index d1adcf6..f8f9833 100644
42029 --- a/drivers/staging/dream/qdsp5/audio_out.c
42030 +++ b/drivers/staging/dream/qdsp5/audio_out.c
42031 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
42032 return 0;
42033 }
42034
42035 -static struct file_operations audio_fops = {
42036 +static const struct file_operations audio_fops = {
42037 .owner = THIS_MODULE,
42038 .open = audio_open,
42039 .release = audio_release,
42040 @@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
42041 .unlocked_ioctl = audio_ioctl,
42042 };
42043
42044 -static struct file_operations audpp_fops = {
42045 +static const struct file_operations audpp_fops = {
42046 .owner = THIS_MODULE,
42047 .open = audpp_open,
42048 .unlocked_ioctl = audpp_ioctl,
42049 diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
42050 index f0f50e3..f6b9dbc 100644
42051 --- a/drivers/staging/dream/qdsp5/audio_qcelp.c
42052 +++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
42053 @@ -816,7 +816,7 @@ err:
42054 return rc;
42055 }
42056
42057 -static struct file_operations audio_qcelp_fops = {
42058 +static const struct file_operations audio_qcelp_fops = {
42059 .owner = THIS_MODULE,
42060 .open = audqcelp_open,
42061 .release = audqcelp_release,
42062 diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
42063 index 037d7ff..5469ec3 100644
42064 --- a/drivers/staging/dream/qdsp5/snd.c
42065 +++ b/drivers/staging/dream/qdsp5/snd.c
42066 @@ -242,7 +242,7 @@ err:
42067 return rc;
42068 }
42069
42070 -static struct file_operations snd_fops = {
42071 +static const struct file_operations snd_fops = {
42072 .owner = THIS_MODULE,
42073 .open = snd_open,
42074 .release = snd_release,
42075 diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
42076 index d4e7d88..0ea632a 100644
42077 --- a/drivers/staging/dream/smd/smd_qmi.c
42078 +++ b/drivers/staging/dream/smd/smd_qmi.c
42079 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
42080 return 0;
42081 }
42082
42083 -static struct file_operations qmi_fops = {
42084 +static const struct file_operations qmi_fops = {
42085 .owner = THIS_MODULE,
42086 .read = qmi_read,
42087 .write = qmi_write,
42088 diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42089 index cd3910b..ff053d3 100644
42090 --- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
42091 +++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42092 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
42093 return rc;
42094 }
42095
42096 -static struct file_operations rpcrouter_server_fops = {
42097 +static const struct file_operations rpcrouter_server_fops = {
42098 .owner = THIS_MODULE,
42099 .open = rpcrouter_open,
42100 .release = rpcrouter_release,
42101 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
42102 .unlocked_ioctl = rpcrouter_ioctl,
42103 };
42104
42105 -static struct file_operations rpcrouter_router_fops = {
42106 +static const struct file_operations rpcrouter_router_fops = {
42107 .owner = THIS_MODULE,
42108 .open = rpcrouter_open,
42109 .release = rpcrouter_release,
42110 diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
42111 index c24e4e0..07665be 100644
42112 --- a/drivers/staging/dst/dcore.c
42113 +++ b/drivers/staging/dst/dcore.c
42114 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
42115 return 0;
42116 }
42117
42118 -static struct block_device_operations dst_blk_ops = {
42119 +static const struct block_device_operations dst_blk_ops = {
42120 .open = dst_bdev_open,
42121 .release = dst_bdev_release,
42122 .owner = THIS_MODULE,
42123 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
42124 n->size = ctl->size;
42125
42126 atomic_set(&n->refcnt, 1);
42127 - atomic_long_set(&n->gen, 0);
42128 + atomic_long_set_unchecked(&n->gen, 0);
42129 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
42130
42131 err = dst_node_sysfs_init(n);
42132 diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
42133 index 557d372..8d84422 100644
42134 --- a/drivers/staging/dst/trans.c
42135 +++ b/drivers/staging/dst/trans.c
42136 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
42137 t->error = 0;
42138 t->retries = 0;
42139 atomic_set(&t->refcnt, 1);
42140 - t->gen = atomic_long_inc_return(&n->gen);
42141 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
42142
42143 t->enc = bio_data_dir(bio);
42144 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
42145 diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
42146 index 94f7752..d051514 100644
42147 --- a/drivers/staging/et131x/et1310_tx.c
42148 +++ b/drivers/staging/et131x/et1310_tx.c
42149 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
42150 struct net_device_stats *stats = &etdev->net_stats;
42151
42152 if (pMpTcb->Flags & fMP_DEST_BROAD)
42153 - atomic_inc(&etdev->Stats.brdcstxmt);
42154 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
42155 else if (pMpTcb->Flags & fMP_DEST_MULTI)
42156 - atomic_inc(&etdev->Stats.multixmt);
42157 + atomic_inc_unchecked(&etdev->Stats.multixmt);
42158 else
42159 - atomic_inc(&etdev->Stats.unixmt);
42160 + atomic_inc_unchecked(&etdev->Stats.unixmt);
42161
42162 if (pMpTcb->Packet) {
42163 stats->tx_bytes += pMpTcb->Packet->len;
42164 diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
42165 index 1dfe06f..f469b4d 100644
42166 --- a/drivers/staging/et131x/et131x_adapter.h
42167 +++ b/drivers/staging/et131x/et131x_adapter.h
42168 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
42169 * operations
42170 */
42171 u32 unircv; /* # multicast packets received */
42172 - atomic_t unixmt; /* # multicast packets for Tx */
42173 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
42174 u32 multircv; /* # multicast packets received */
42175 - atomic_t multixmt; /* # multicast packets for Tx */
42176 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
42177 u32 brdcstrcv; /* # broadcast packets received */
42178 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
42179 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
42180 u32 norcvbuf; /* # Rx packets discarded */
42181 u32 noxmtbuf; /* # Tx packets discarded */
42182
42183 diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
42184 index 4bd353a..e28f455 100644
42185 --- a/drivers/staging/go7007/go7007-v4l2.c
42186 +++ b/drivers/staging/go7007/go7007-v4l2.c
42187 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42188 return 0;
42189 }
42190
42191 -static struct vm_operations_struct go7007_vm_ops = {
42192 +static const struct vm_operations_struct go7007_vm_ops = {
42193 .open = go7007_vm_open,
42194 .close = go7007_vm_close,
42195 .fault = go7007_vm_fault,
42196 diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
42197 index 366dc95..b974d87 100644
42198 --- a/drivers/staging/hv/Channel.c
42199 +++ b/drivers/staging/hv/Channel.c
42200 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
42201
42202 DPRINT_ENTER(VMBUS);
42203
42204 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
42205 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
42206 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
42207 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
42208
42209 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
42210 ASSERT(msgInfo != NULL);
42211 diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
42212 index b12237f..01ae28a 100644
42213 --- a/drivers/staging/hv/Hv.c
42214 +++ b/drivers/staging/hv/Hv.c
42215 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
42216 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
42217 u32 outputAddressHi = outputAddress >> 32;
42218 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
42219 - volatile void *hypercallPage = gHvContext.HypercallPage;
42220 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
42221
42222 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
42223 Control, Input, Output);
42224 diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
42225 index d089bb1..2ebc158 100644
42226 --- a/drivers/staging/hv/VmbusApi.h
42227 +++ b/drivers/staging/hv/VmbusApi.h
42228 @@ -109,7 +109,7 @@ struct vmbus_channel_interface {
42229 u32 *GpadlHandle);
42230 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
42231 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
42232 -};
42233 +} __no_const;
42234
42235 /* Base driver object */
42236 struct hv_driver {
42237 diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
42238 index 5a37cce..6ecc88c 100644
42239 --- a/drivers/staging/hv/VmbusPrivate.h
42240 +++ b/drivers/staging/hv/VmbusPrivate.h
42241 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
42242 struct VMBUS_CONNECTION {
42243 enum VMBUS_CONNECT_STATE ConnectState;
42244
42245 - atomic_t NextGpadlHandle;
42246 + atomic_unchecked_t NextGpadlHandle;
42247
42248 /*
42249 * Represents channel interrupts. Each bit position represents a
42250 diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
42251 index 871a202..ca50ddf 100644
42252 --- a/drivers/staging/hv/blkvsc_drv.c
42253 +++ b/drivers/staging/hv/blkvsc_drv.c
42254 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
42255 /* The one and only one */
42256 static struct blkvsc_driver_context g_blkvsc_drv;
42257
42258 -static struct block_device_operations block_ops = {
42259 +static const struct block_device_operations block_ops = {
42260 .owner = THIS_MODULE,
42261 .open = blkvsc_open,
42262 .release = blkvsc_release,
42263 diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
42264 index 6acc49a..fbc8d46 100644
42265 --- a/drivers/staging/hv/vmbus_drv.c
42266 +++ b/drivers/staging/hv/vmbus_drv.c
42267 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42268 to_device_context(root_device_obj);
42269 struct device_context *child_device_ctx =
42270 to_device_context(child_device_obj);
42271 - static atomic_t device_num = ATOMIC_INIT(0);
42272 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42273
42274 DPRINT_ENTER(VMBUS_DRV);
42275
42276 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42277
42278 /* Set the device name. Otherwise, device_register() will fail. */
42279 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
42280 - atomic_inc_return(&device_num));
42281 + atomic_inc_return_unchecked(&device_num));
42282
42283 /* The new device belongs to this bus */
42284 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
42285 diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
42286 index d926189..17b19fd 100644
42287 --- a/drivers/staging/iio/ring_generic.h
42288 +++ b/drivers/staging/iio/ring_generic.h
42289 @@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
42290
42291 int (*is_enabled)(struct iio_ring_buffer *ring);
42292 int (*enable)(struct iio_ring_buffer *ring);
42293 -};
42294 +} __no_const;
42295
42296 /**
42297 * struct iio_ring_buffer - general ring buffer structure
42298 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42299 index 1b237b7..88c624e 100644
42300 --- a/drivers/staging/octeon/ethernet-rx.c
42301 +++ b/drivers/staging/octeon/ethernet-rx.c
42302 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42303 /* Increment RX stats for virtual ports */
42304 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42305 #ifdef CONFIG_64BIT
42306 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42307 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42308 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42309 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42310 #else
42311 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42312 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42313 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42314 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42315 #endif
42316 }
42317 netif_receive_skb(skb);
42318 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42319 dev->name);
42320 */
42321 #ifdef CONFIG_64BIT
42322 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42323 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
42324 #else
42325 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42326 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
42327 #endif
42328 dev_kfree_skb_irq(skb);
42329 }
42330 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42331 index 492c502..d9909f1 100644
42332 --- a/drivers/staging/octeon/ethernet.c
42333 +++ b/drivers/staging/octeon/ethernet.c
42334 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42335 * since the RX tasklet also increments it.
42336 */
42337 #ifdef CONFIG_64BIT
42338 - atomic64_add(rx_status.dropped_packets,
42339 - (atomic64_t *)&priv->stats.rx_dropped);
42340 + atomic64_add_unchecked(rx_status.dropped_packets,
42341 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42342 #else
42343 - atomic_add(rx_status.dropped_packets,
42344 - (atomic_t *)&priv->stats.rx_dropped);
42345 + atomic_add_unchecked(rx_status.dropped_packets,
42346 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
42347 #endif
42348 }
42349
42350 diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
42351 index a35bd5d..28fff45 100644
42352 --- a/drivers/staging/otus/80211core/pub_zfi.h
42353 +++ b/drivers/staging/otus/80211core/pub_zfi.h
42354 @@ -531,7 +531,7 @@ struct zsCbFuncTbl
42355 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
42356
42357 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
42358 -};
42359 +} __no_const;
42360
42361 extern void zfZeroMemory(u8_t* va, u16_t length);
42362 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
42363 diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
42364 index c39a25f..696f5aa 100644
42365 --- a/drivers/staging/panel/panel.c
42366 +++ b/drivers/staging/panel/panel.c
42367 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
42368 return 0;
42369 }
42370
42371 -static struct file_operations lcd_fops = {
42372 +static const struct file_operations lcd_fops = {
42373 .write = lcd_write,
42374 .open = lcd_open,
42375 .release = lcd_release,
42376 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
42377 return 0;
42378 }
42379
42380 -static struct file_operations keypad_fops = {
42381 +static const struct file_operations keypad_fops = {
42382 .read = keypad_read, /* read */
42383 .open = keypad_open, /* open */
42384 .release = keypad_release, /* close */
42385 diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
42386 index 270ebcb..37e46af 100644
42387 --- a/drivers/staging/phison/phison.c
42388 +++ b/drivers/staging/phison/phison.c
42389 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
42390 ATA_BMDMA_SHT(DRV_NAME),
42391 };
42392
42393 -static struct ata_port_operations phison_ops = {
42394 +static const struct ata_port_operations phison_ops = {
42395 .inherits = &ata_bmdma_port_ops,
42396 .prereset = phison_pre_reset,
42397 };
42398 diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
42399 index 2eb8e3d..57616a7 100644
42400 --- a/drivers/staging/poch/poch.c
42401 +++ b/drivers/staging/poch/poch.c
42402 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
42403 return 0;
42404 }
42405
42406 -static struct file_operations poch_fops = {
42407 +static const struct file_operations poch_fops = {
42408 .owner = THIS_MODULE,
42409 .open = poch_open,
42410 .release = poch_release,
42411 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
42412 index c94de31..19402bc 100644
42413 --- a/drivers/staging/pohmelfs/inode.c
42414 +++ b/drivers/staging/pohmelfs/inode.c
42415 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42416 mutex_init(&psb->mcache_lock);
42417 psb->mcache_root = RB_ROOT;
42418 psb->mcache_timeout = msecs_to_jiffies(5000);
42419 - atomic_long_set(&psb->mcache_gen, 0);
42420 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
42421
42422 psb->trans_max_pages = 100;
42423
42424 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42425 INIT_LIST_HEAD(&psb->crypto_ready_list);
42426 INIT_LIST_HEAD(&psb->crypto_active_list);
42427
42428 - atomic_set(&psb->trans_gen, 1);
42429 + atomic_set_unchecked(&psb->trans_gen, 1);
42430 atomic_long_set(&psb->total_inodes, 0);
42431
42432 mutex_init(&psb->state_lock);
42433 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
42434 index e22665c..a2a9390 100644
42435 --- a/drivers/staging/pohmelfs/mcache.c
42436 +++ b/drivers/staging/pohmelfs/mcache.c
42437 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
42438 m->data = data;
42439 m->start = start;
42440 m->size = size;
42441 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
42442 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
42443
42444 mutex_lock(&psb->mcache_lock);
42445 err = pohmelfs_mcache_insert(psb, m);
42446 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
42447 index 623a07d..4035c19 100644
42448 --- a/drivers/staging/pohmelfs/netfs.h
42449 +++ b/drivers/staging/pohmelfs/netfs.h
42450 @@ -570,14 +570,14 @@ struct pohmelfs_config;
42451 struct pohmelfs_sb {
42452 struct rb_root mcache_root;
42453 struct mutex mcache_lock;
42454 - atomic_long_t mcache_gen;
42455 + atomic_long_unchecked_t mcache_gen;
42456 unsigned long mcache_timeout;
42457
42458 unsigned int idx;
42459
42460 unsigned int trans_retries;
42461
42462 - atomic_t trans_gen;
42463 + atomic_unchecked_t trans_gen;
42464
42465 unsigned int crypto_attached_size;
42466 unsigned int crypto_align_size;
42467 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42468 index 36a2535..0591bf4 100644
42469 --- a/drivers/staging/pohmelfs/trans.c
42470 +++ b/drivers/staging/pohmelfs/trans.c
42471 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42472 int err;
42473 struct netfs_cmd *cmd = t->iovec.iov_base;
42474
42475 - t->gen = atomic_inc_return(&psb->trans_gen);
42476 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42477
42478 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42479 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42480 diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42481 index f890a16..509ece8 100644
42482 --- a/drivers/staging/sep/sep_driver.c
42483 +++ b/drivers/staging/sep/sep_driver.c
42484 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42485 static dev_t sep_devno;
42486
42487 /* the files operations structure of the driver */
42488 -static struct file_operations sep_file_operations = {
42489 +static const struct file_operations sep_file_operations = {
42490 .owner = THIS_MODULE,
42491 .ioctl = sep_ioctl,
42492 .poll = sep_poll,
42493 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42494 index 5e16bc3..7655b10 100644
42495 --- a/drivers/staging/usbip/usbip_common.h
42496 +++ b/drivers/staging/usbip/usbip_common.h
42497 @@ -374,7 +374,7 @@ struct usbip_device {
42498 void (*shutdown)(struct usbip_device *);
42499 void (*reset)(struct usbip_device *);
42500 void (*unusable)(struct usbip_device *);
42501 - } eh_ops;
42502 + } __no_const eh_ops;
42503 };
42504
42505
42506 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42507 index 57f7946..d9df23d 100644
42508 --- a/drivers/staging/usbip/vhci.h
42509 +++ b/drivers/staging/usbip/vhci.h
42510 @@ -92,7 +92,7 @@ struct vhci_hcd {
42511 unsigned resuming:1;
42512 unsigned long re_timeout;
42513
42514 - atomic_t seqnum;
42515 + atomic_unchecked_t seqnum;
42516
42517 /*
42518 * NOTE:
42519 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42520 index 20cd7db..c2693ff 100644
42521 --- a/drivers/staging/usbip/vhci_hcd.c
42522 +++ b/drivers/staging/usbip/vhci_hcd.c
42523 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42524 return;
42525 }
42526
42527 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42528 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42529 if (priv->seqnum == 0xffff)
42530 usbip_uinfo("seqnum max\n");
42531
42532 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42533 return -ENOMEM;
42534 }
42535
42536 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42537 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42538 if (unlink->seqnum == 0xffff)
42539 usbip_uinfo("seqnum max\n");
42540
42541 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42542 vdev->rhport = rhport;
42543 }
42544
42545 - atomic_set(&vhci->seqnum, 0);
42546 + atomic_set_unchecked(&vhci->seqnum, 0);
42547 spin_lock_init(&vhci->lock);
42548
42549
42550 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42551 index 7fd76fe..673695a 100644
42552 --- a/drivers/staging/usbip/vhci_rx.c
42553 +++ b/drivers/staging/usbip/vhci_rx.c
42554 @@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42555 usbip_uerr("cannot find a urb of seqnum %u\n",
42556 pdu->base.seqnum);
42557 usbip_uinfo("max seqnum %d\n",
42558 - atomic_read(&the_controller->seqnum));
42559 + atomic_read_unchecked(&the_controller->seqnum));
42560 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42561 return;
42562 }
42563 diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42564 index 7891288..8e31300 100644
42565 --- a/drivers/staging/vme/devices/vme_user.c
42566 +++ b/drivers/staging/vme/devices/vme_user.c
42567 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42568 static int __init vme_user_probe(struct device *, int, int);
42569 static int __exit vme_user_remove(struct device *, int, int);
42570
42571 -static struct file_operations vme_user_fops = {
42572 +static const struct file_operations vme_user_fops = {
42573 .open = vme_user_open,
42574 .release = vme_user_release,
42575 .read = vme_user_read,
42576 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42577 index 58abf44..00c1fc8 100644
42578 --- a/drivers/staging/vt6655/hostap.c
42579 +++ b/drivers/staging/vt6655/hostap.c
42580 @@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42581 PSDevice apdev_priv;
42582 struct net_device *dev = pDevice->dev;
42583 int ret;
42584 - const struct net_device_ops apdev_netdev_ops = {
42585 + net_device_ops_no_const apdev_netdev_ops = {
42586 .ndo_start_xmit = pDevice->tx_80211,
42587 };
42588
42589 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42590 index 0c8267a..db1f363 100644
42591 --- a/drivers/staging/vt6656/hostap.c
42592 +++ b/drivers/staging/vt6656/hostap.c
42593 @@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42594 PSDevice apdev_priv;
42595 struct net_device *dev = pDevice->dev;
42596 int ret;
42597 - const struct net_device_ops apdev_netdev_ops = {
42598 + net_device_ops_no_const apdev_netdev_ops = {
42599 .ndo_start_xmit = pDevice->tx_80211,
42600 };
42601
42602 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42603 index 925678b..da7f5ed 100644
42604 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
42605 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42606 @@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42607
42608 struct usbctlx_completor {
42609 int (*complete) (struct usbctlx_completor *);
42610 -};
42611 +} __no_const;
42612 typedef struct usbctlx_completor usbctlx_completor_t;
42613
42614 static int
42615 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42616 index 40de151..924f268 100644
42617 --- a/drivers/telephony/ixj.c
42618 +++ b/drivers/telephony/ixj.c
42619 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42620 bool mContinue;
42621 char *pIn, *pOut;
42622
42623 + pax_track_stack();
42624 +
42625 if (!SCI_Prepare(j))
42626 return 0;
42627
42628 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42629 index e941367..b631f5a 100644
42630 --- a/drivers/uio/uio.c
42631 +++ b/drivers/uio/uio.c
42632 @@ -23,6 +23,7 @@
42633 #include <linux/string.h>
42634 #include <linux/kobject.h>
42635 #include <linux/uio_driver.h>
42636 +#include <asm/local.h>
42637
42638 #define UIO_MAX_DEVICES 255
42639
42640 @@ -30,10 +31,10 @@ struct uio_device {
42641 struct module *owner;
42642 struct device *dev;
42643 int minor;
42644 - atomic_t event;
42645 + atomic_unchecked_t event;
42646 struct fasync_struct *async_queue;
42647 wait_queue_head_t wait;
42648 - int vma_count;
42649 + local_t vma_count;
42650 struct uio_info *info;
42651 struct kobject *map_dir;
42652 struct kobject *portio_dir;
42653 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42654 return entry->show(mem, buf);
42655 }
42656
42657 -static struct sysfs_ops map_sysfs_ops = {
42658 +static const struct sysfs_ops map_sysfs_ops = {
42659 .show = map_type_show,
42660 };
42661
42662 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42663 return entry->show(port, buf);
42664 }
42665
42666 -static struct sysfs_ops portio_sysfs_ops = {
42667 +static const struct sysfs_ops portio_sysfs_ops = {
42668 .show = portio_type_show,
42669 };
42670
42671 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42672 struct uio_device *idev = dev_get_drvdata(dev);
42673 if (idev)
42674 return sprintf(buf, "%u\n",
42675 - (unsigned int)atomic_read(&idev->event));
42676 + (unsigned int)atomic_read_unchecked(&idev->event));
42677 else
42678 return -ENODEV;
42679 }
42680 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42681 {
42682 struct uio_device *idev = info->uio_dev;
42683
42684 - atomic_inc(&idev->event);
42685 + atomic_inc_unchecked(&idev->event);
42686 wake_up_interruptible(&idev->wait);
42687 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42688 }
42689 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42690 }
42691
42692 listener->dev = idev;
42693 - listener->event_count = atomic_read(&idev->event);
42694 + listener->event_count = atomic_read_unchecked(&idev->event);
42695 filep->private_data = listener;
42696
42697 if (idev->info->open) {
42698 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42699 return -EIO;
42700
42701 poll_wait(filep, &idev->wait, wait);
42702 - if (listener->event_count != atomic_read(&idev->event))
42703 + if (listener->event_count != atomic_read_unchecked(&idev->event))
42704 return POLLIN | POLLRDNORM;
42705 return 0;
42706 }
42707 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42708 do {
42709 set_current_state(TASK_INTERRUPTIBLE);
42710
42711 - event_count = atomic_read(&idev->event);
42712 + event_count = atomic_read_unchecked(&idev->event);
42713 if (event_count != listener->event_count) {
42714 if (copy_to_user(buf, &event_count, count))
42715 retval = -EFAULT;
42716 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42717 static void uio_vma_open(struct vm_area_struct *vma)
42718 {
42719 struct uio_device *idev = vma->vm_private_data;
42720 - idev->vma_count++;
42721 + local_inc(&idev->vma_count);
42722 }
42723
42724 static void uio_vma_close(struct vm_area_struct *vma)
42725 {
42726 struct uio_device *idev = vma->vm_private_data;
42727 - idev->vma_count--;
42728 + local_dec(&idev->vma_count);
42729 }
42730
42731 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42732 @@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42733 idev->owner = owner;
42734 idev->info = info;
42735 init_waitqueue_head(&idev->wait);
42736 - atomic_set(&idev->event, 0);
42737 + atomic_set_unchecked(&idev->event, 0);
42738
42739 ret = uio_get_minor(idev);
42740 if (ret)
42741 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42742 index fbea856..06efea6 100644
42743 --- a/drivers/usb/atm/usbatm.c
42744 +++ b/drivers/usb/atm/usbatm.c
42745 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42746 if (printk_ratelimit())
42747 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42748 __func__, vpi, vci);
42749 - atomic_inc(&vcc->stats->rx_err);
42750 + atomic_inc_unchecked(&vcc->stats->rx_err);
42751 return;
42752 }
42753
42754 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42755 if (length > ATM_MAX_AAL5_PDU) {
42756 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42757 __func__, length, vcc);
42758 - atomic_inc(&vcc->stats->rx_err);
42759 + atomic_inc_unchecked(&vcc->stats->rx_err);
42760 goto out;
42761 }
42762
42763 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42764 if (sarb->len < pdu_length) {
42765 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42766 __func__, pdu_length, sarb->len, vcc);
42767 - atomic_inc(&vcc->stats->rx_err);
42768 + atomic_inc_unchecked(&vcc->stats->rx_err);
42769 goto out;
42770 }
42771
42772 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42773 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42774 __func__, vcc);
42775 - atomic_inc(&vcc->stats->rx_err);
42776 + atomic_inc_unchecked(&vcc->stats->rx_err);
42777 goto out;
42778 }
42779
42780 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42781 if (printk_ratelimit())
42782 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42783 __func__, length);
42784 - atomic_inc(&vcc->stats->rx_drop);
42785 + atomic_inc_unchecked(&vcc->stats->rx_drop);
42786 goto out;
42787 }
42788
42789 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42790
42791 vcc->push(vcc, skb);
42792
42793 - atomic_inc(&vcc->stats->rx);
42794 + atomic_inc_unchecked(&vcc->stats->rx);
42795 out:
42796 skb_trim(sarb, 0);
42797 }
42798 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42799 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42800
42801 usbatm_pop(vcc, skb);
42802 - atomic_inc(&vcc->stats->tx);
42803 + atomic_inc_unchecked(&vcc->stats->tx);
42804
42805 skb = skb_dequeue(&instance->sndqueue);
42806 }
42807 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42808 if (!left--)
42809 return sprintf(page,
42810 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42811 - atomic_read(&atm_dev->stats.aal5.tx),
42812 - atomic_read(&atm_dev->stats.aal5.tx_err),
42813 - atomic_read(&atm_dev->stats.aal5.rx),
42814 - atomic_read(&atm_dev->stats.aal5.rx_err),
42815 - atomic_read(&atm_dev->stats.aal5.rx_drop));
42816 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42817 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42818 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42819 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42820 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42821
42822 if (!left--) {
42823 if (instance->disconnected)
42824 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42825 index 24e6205..fe5a5d4 100644
42826 --- a/drivers/usb/core/hcd.c
42827 +++ b/drivers/usb/core/hcd.c
42828 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42829
42830 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42831
42832 -struct usb_mon_operations *mon_ops;
42833 +const struct usb_mon_operations *mon_ops;
42834
42835 /*
42836 * The registration is unlocked.
42837 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42838 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42839 */
42840
42841 -int usb_mon_register (struct usb_mon_operations *ops)
42842 +int usb_mon_register (const struct usb_mon_operations *ops)
42843 {
42844
42845 if (mon_ops)
42846 diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42847 index bcbe104..9cfd1c6 100644
42848 --- a/drivers/usb/core/hcd.h
42849 +++ b/drivers/usb/core/hcd.h
42850 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42851 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42852
42853 struct usb_mon_operations {
42854 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42855 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42856 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42857 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42858 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42859 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42860 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42861 };
42862
42863 -extern struct usb_mon_operations *mon_ops;
42864 +extern const struct usb_mon_operations *mon_ops;
42865
42866 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42867 {
42868 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42869 (*mon_ops->urb_complete)(bus, urb, status);
42870 }
42871
42872 -int usb_mon_register(struct usb_mon_operations *ops);
42873 +int usb_mon_register(const struct usb_mon_operations *ops);
42874 void usb_mon_deregister(void);
42875
42876 #else
42877 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42878 index 409cc94..a673bad 100644
42879 --- a/drivers/usb/core/message.c
42880 +++ b/drivers/usb/core/message.c
42881 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42882 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42883 if (buf) {
42884 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42885 - if (len > 0) {
42886 - smallbuf = kmalloc(++len, GFP_NOIO);
42887 + if (len++ > 0) {
42888 + smallbuf = kmalloc(len, GFP_NOIO);
42889 if (!smallbuf)
42890 return buf;
42891 memcpy(smallbuf, buf, len);
42892 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42893 index 62ff5e7..530b74e 100644
42894 --- a/drivers/usb/misc/appledisplay.c
42895 +++ b/drivers/usb/misc/appledisplay.c
42896 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42897 return pdata->msgdata[1];
42898 }
42899
42900 -static struct backlight_ops appledisplay_bl_data = {
42901 +static const struct backlight_ops appledisplay_bl_data = {
42902 .get_brightness = appledisplay_bl_get_brightness,
42903 .update_status = appledisplay_bl_update_status,
42904 };
42905 diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42906 index e0c2db3..bd8cb66 100644
42907 --- a/drivers/usb/mon/mon_main.c
42908 +++ b/drivers/usb/mon/mon_main.c
42909 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42910 /*
42911 * Ops
42912 */
42913 -static struct usb_mon_operations mon_ops_0 = {
42914 +static const struct usb_mon_operations mon_ops_0 = {
42915 .urb_submit = mon_submit,
42916 .urb_submit_error = mon_submit_error,
42917 .urb_complete = mon_complete,
42918 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42919 index d6bea3e..60b250e 100644
42920 --- a/drivers/usb/wusbcore/wa-hc.h
42921 +++ b/drivers/usb/wusbcore/wa-hc.h
42922 @@ -192,7 +192,7 @@ struct wahc {
42923 struct list_head xfer_delayed_list;
42924 spinlock_t xfer_list_lock;
42925 struct work_struct xfer_work;
42926 - atomic_t xfer_id_count;
42927 + atomic_unchecked_t xfer_id_count;
42928 };
42929
42930
42931 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42932 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42933 spin_lock_init(&wa->xfer_list_lock);
42934 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42935 - atomic_set(&wa->xfer_id_count, 1);
42936 + atomic_set_unchecked(&wa->xfer_id_count, 1);
42937 }
42938
42939 /**
42940 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42941 index 613a5fc..3174865 100644
42942 --- a/drivers/usb/wusbcore/wa-xfer.c
42943 +++ b/drivers/usb/wusbcore/wa-xfer.c
42944 @@ -293,7 +293,7 @@ out:
42945 */
42946 static void wa_xfer_id_init(struct wa_xfer *xfer)
42947 {
42948 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42949 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42950 }
42951
42952 /*
42953 diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42954 index aa42fce..f8a828c 100644
42955 --- a/drivers/uwb/wlp/messages.c
42956 +++ b/drivers/uwb/wlp/messages.c
42957 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42958 size_t len = skb->len;
42959 size_t used;
42960 ssize_t result;
42961 - struct wlp_nonce enonce, rnonce;
42962 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42963 enum wlp_assc_error assc_err;
42964 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42965 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42966 diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42967 index 0370399..6627c94 100644
42968 --- a/drivers/uwb/wlp/sysfs.c
42969 +++ b/drivers/uwb/wlp/sysfs.c
42970 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42971 return ret;
42972 }
42973
42974 -static
42975 -struct sysfs_ops wss_sysfs_ops = {
42976 +static const struct sysfs_ops wss_sysfs_ops = {
42977 .show = wlp_wss_attr_show,
42978 .store = wlp_wss_attr_store,
42979 };
42980 diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42981 index 8c5e432..5ee90ea 100644
42982 --- a/drivers/video/atmel_lcdfb.c
42983 +++ b/drivers/video/atmel_lcdfb.c
42984 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42985 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42986 }
42987
42988 -static struct backlight_ops atmel_lcdc_bl_ops = {
42989 +static const struct backlight_ops atmel_lcdc_bl_ops = {
42990 .update_status = atmel_bl_update_status,
42991 .get_brightness = atmel_bl_get_brightness,
42992 };
42993 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42994 index e4e4d43..66bcbcc 100644
42995 --- a/drivers/video/aty/aty128fb.c
42996 +++ b/drivers/video/aty/aty128fb.c
42997 @@ -149,7 +149,7 @@ enum {
42998 };
42999
43000 /* Must match above enum */
43001 -static const char *r128_family[] __devinitdata = {
43002 +static const char *r128_family[] __devinitconst = {
43003 "AGP",
43004 "PCI",
43005 "PRO AGP",
43006 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
43007 return bd->props.brightness;
43008 }
43009
43010 -static struct backlight_ops aty128_bl_data = {
43011 +static const struct backlight_ops aty128_bl_data = {
43012 .get_brightness = aty128_bl_get_brightness,
43013 .update_status = aty128_bl_update_status,
43014 };
43015 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
43016 index 913b4a4..9295a38 100644
43017 --- a/drivers/video/aty/atyfb_base.c
43018 +++ b/drivers/video/aty/atyfb_base.c
43019 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
43020 return bd->props.brightness;
43021 }
43022
43023 -static struct backlight_ops aty_bl_data = {
43024 +static const struct backlight_ops aty_bl_data = {
43025 .get_brightness = aty_bl_get_brightness,
43026 .update_status = aty_bl_update_status,
43027 };
43028 diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
43029 index 1a056ad..221bd6a 100644
43030 --- a/drivers/video/aty/radeon_backlight.c
43031 +++ b/drivers/video/aty/radeon_backlight.c
43032 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
43033 return bd->props.brightness;
43034 }
43035
43036 -static struct backlight_ops radeon_bl_data = {
43037 +static const struct backlight_ops radeon_bl_data = {
43038 .get_brightness = radeon_bl_get_brightness,
43039 .update_status = radeon_bl_update_status,
43040 };
43041 diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
43042 index ad05da5..3cb2cb9 100644
43043 --- a/drivers/video/backlight/adp5520_bl.c
43044 +++ b/drivers/video/backlight/adp5520_bl.c
43045 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
43046 return error ? data->current_brightness : reg_val;
43047 }
43048
43049 -static struct backlight_ops adp5520_bl_ops = {
43050 +static const struct backlight_ops adp5520_bl_ops = {
43051 .update_status = adp5520_bl_update_status,
43052 .get_brightness = adp5520_bl_get_brightness,
43053 };
43054 diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
43055 index 2c3bdfc..d769b0b 100644
43056 --- a/drivers/video/backlight/adx_bl.c
43057 +++ b/drivers/video/backlight/adx_bl.c
43058 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
43059 return 1;
43060 }
43061
43062 -static struct backlight_ops adx_backlight_ops = {
43063 +static const struct backlight_ops adx_backlight_ops = {
43064 .options = 0,
43065 .update_status = adx_backlight_update_status,
43066 .get_brightness = adx_backlight_get_brightness,
43067 diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
43068 index 505c082..6b6b3cc 100644
43069 --- a/drivers/video/backlight/atmel-pwm-bl.c
43070 +++ b/drivers/video/backlight/atmel-pwm-bl.c
43071 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
43072 return pwm_channel_enable(&pwmbl->pwmc);
43073 }
43074
43075 -static struct backlight_ops atmel_pwm_bl_ops = {
43076 +static const struct backlight_ops atmel_pwm_bl_ops = {
43077 .get_brightness = atmel_pwm_bl_get_intensity,
43078 .update_status = atmel_pwm_bl_set_intensity,
43079 };
43080 diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
43081 index 5e20e6e..89025e6 100644
43082 --- a/drivers/video/backlight/backlight.c
43083 +++ b/drivers/video/backlight/backlight.c
43084 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
43085 * ERR_PTR() or a pointer to the newly allocated device.
43086 */
43087 struct backlight_device *backlight_device_register(const char *name,
43088 - struct device *parent, void *devdata, struct backlight_ops *ops)
43089 + struct device *parent, void *devdata, const struct backlight_ops *ops)
43090 {
43091 struct backlight_device *new_bd;
43092 int rc;
43093 diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
43094 index 9677494..b4bcf80 100644
43095 --- a/drivers/video/backlight/corgi_lcd.c
43096 +++ b/drivers/video/backlight/corgi_lcd.c
43097 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
43098 }
43099 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
43100
43101 -static struct backlight_ops corgi_bl_ops = {
43102 +static const struct backlight_ops corgi_bl_ops = {
43103 .get_brightness = corgi_bl_get_intensity,
43104 .update_status = corgi_bl_update_status,
43105 };
43106 diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
43107 index b9fe62b..2914bf1 100644
43108 --- a/drivers/video/backlight/cr_bllcd.c
43109 +++ b/drivers/video/backlight/cr_bllcd.c
43110 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
43111 return intensity;
43112 }
43113
43114 -static struct backlight_ops cr_backlight_ops = {
43115 +static const struct backlight_ops cr_backlight_ops = {
43116 .get_brightness = cr_backlight_get_intensity,
43117 .update_status = cr_backlight_set_intensity,
43118 };
43119 diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
43120 index 701a108..feacfd5 100644
43121 --- a/drivers/video/backlight/da903x_bl.c
43122 +++ b/drivers/video/backlight/da903x_bl.c
43123 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
43124 return data->current_brightness;
43125 }
43126
43127 -static struct backlight_ops da903x_backlight_ops = {
43128 +static const struct backlight_ops da903x_backlight_ops = {
43129 .update_status = da903x_backlight_update_status,
43130 .get_brightness = da903x_backlight_get_brightness,
43131 };
43132 diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
43133 index 6d27f62..e6d348e 100644
43134 --- a/drivers/video/backlight/generic_bl.c
43135 +++ b/drivers/video/backlight/generic_bl.c
43136 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
43137 }
43138 EXPORT_SYMBOL(corgibl_limit_intensity);
43139
43140 -static struct backlight_ops genericbl_ops = {
43141 +static const struct backlight_ops genericbl_ops = {
43142 .options = BL_CORE_SUSPENDRESUME,
43143 .get_brightness = genericbl_get_intensity,
43144 .update_status = genericbl_send_intensity,
43145 diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
43146 index 7fb4eef..f7cc528 100644
43147 --- a/drivers/video/backlight/hp680_bl.c
43148 +++ b/drivers/video/backlight/hp680_bl.c
43149 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
43150 return current_intensity;
43151 }
43152
43153 -static struct backlight_ops hp680bl_ops = {
43154 +static const struct backlight_ops hp680bl_ops = {
43155 .get_brightness = hp680bl_get_intensity,
43156 .update_status = hp680bl_set_intensity,
43157 };
43158 diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
43159 index 7aed256..db9071f 100644
43160 --- a/drivers/video/backlight/jornada720_bl.c
43161 +++ b/drivers/video/backlight/jornada720_bl.c
43162 @@ -93,7 +93,7 @@ out:
43163 return ret;
43164 }
43165
43166 -static struct backlight_ops jornada_bl_ops = {
43167 +static const struct backlight_ops jornada_bl_ops = {
43168 .get_brightness = jornada_bl_get_brightness,
43169 .update_status = jornada_bl_update_status,
43170 .options = BL_CORE_SUSPENDRESUME,
43171 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
43172 index a38fda1..939e7b8 100644
43173 --- a/drivers/video/backlight/kb3886_bl.c
43174 +++ b/drivers/video/backlight/kb3886_bl.c
43175 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
43176 return kb3886bl_intensity;
43177 }
43178
43179 -static struct backlight_ops kb3886bl_ops = {
43180 +static const struct backlight_ops kb3886bl_ops = {
43181 .get_brightness = kb3886bl_get_intensity,
43182 .update_status = kb3886bl_send_intensity,
43183 };
43184 diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
43185 index 6b488b8..00a9591 100644
43186 --- a/drivers/video/backlight/locomolcd.c
43187 +++ b/drivers/video/backlight/locomolcd.c
43188 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
43189 return current_intensity;
43190 }
43191
43192 -static struct backlight_ops locomobl_data = {
43193 +static const struct backlight_ops locomobl_data = {
43194 .get_brightness = locomolcd_get_intensity,
43195 .update_status = locomolcd_set_intensity,
43196 };
43197 diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
43198 index 99bdfa8..3dac448 100644
43199 --- a/drivers/video/backlight/mbp_nvidia_bl.c
43200 +++ b/drivers/video/backlight/mbp_nvidia_bl.c
43201 @@ -33,7 +33,7 @@ struct dmi_match_data {
43202 unsigned long iostart;
43203 unsigned long iolen;
43204 /* Backlight operations structure. */
43205 - struct backlight_ops backlight_ops;
43206 + const struct backlight_ops backlight_ops;
43207 };
43208
43209 /* Module parameters. */
43210 diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
43211 index cbad67e..3cf900e 100644
43212 --- a/drivers/video/backlight/omap1_bl.c
43213 +++ b/drivers/video/backlight/omap1_bl.c
43214 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
43215 return bl->current_intensity;
43216 }
43217
43218 -static struct backlight_ops omapbl_ops = {
43219 +static const struct backlight_ops omapbl_ops = {
43220 .get_brightness = omapbl_get_intensity,
43221 .update_status = omapbl_update_status,
43222 };
43223 diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
43224 index 9edaf24..075786e 100644
43225 --- a/drivers/video/backlight/progear_bl.c
43226 +++ b/drivers/video/backlight/progear_bl.c
43227 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
43228 return intensity - HW_LEVEL_MIN;
43229 }
43230
43231 -static struct backlight_ops progearbl_ops = {
43232 +static const struct backlight_ops progearbl_ops = {
43233 .get_brightness = progearbl_get_intensity,
43234 .update_status = progearbl_set_intensity,
43235 };
43236 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
43237 index 8871662..df9e0b3 100644
43238 --- a/drivers/video/backlight/pwm_bl.c
43239 +++ b/drivers/video/backlight/pwm_bl.c
43240 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
43241 return bl->props.brightness;
43242 }
43243
43244 -static struct backlight_ops pwm_backlight_ops = {
43245 +static const struct backlight_ops pwm_backlight_ops = {
43246 .update_status = pwm_backlight_update_status,
43247 .get_brightness = pwm_backlight_get_brightness,
43248 };
43249 diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
43250 index 43edbad..e14ce4d 100644
43251 --- a/drivers/video/backlight/tosa_bl.c
43252 +++ b/drivers/video/backlight/tosa_bl.c
43253 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
43254 return props->brightness;
43255 }
43256
43257 -static struct backlight_ops bl_ops = {
43258 +static const struct backlight_ops bl_ops = {
43259 .get_brightness = tosa_bl_get_brightness,
43260 .update_status = tosa_bl_update_status,
43261 };
43262 diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
43263 index 467bdb7..e32add3 100644
43264 --- a/drivers/video/backlight/wm831x_bl.c
43265 +++ b/drivers/video/backlight/wm831x_bl.c
43266 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
43267 return data->current_brightness;
43268 }
43269
43270 -static struct backlight_ops wm831x_backlight_ops = {
43271 +static const struct backlight_ops wm831x_backlight_ops = {
43272 .options = BL_CORE_SUSPENDRESUME,
43273 .update_status = wm831x_backlight_update_status,
43274 .get_brightness = wm831x_backlight_get_brightness,
43275 diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
43276 index e49ae5e..db4e6f7 100644
43277 --- a/drivers/video/bf54x-lq043fb.c
43278 +++ b/drivers/video/bf54x-lq043fb.c
43279 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43280 return 0;
43281 }
43282
43283 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43284 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43285 .get_brightness = bl_get_brightness,
43286 };
43287
43288 diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
43289 index 2c72a7c..d523e52 100644
43290 --- a/drivers/video/bfin-t350mcqb-fb.c
43291 +++ b/drivers/video/bfin-t350mcqb-fb.c
43292 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43293 return 0;
43294 }
43295
43296 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43297 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43298 .get_brightness = bl_get_brightness,
43299 };
43300
43301 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
43302 index f53b9f1..958bf4e 100644
43303 --- a/drivers/video/fbcmap.c
43304 +++ b/drivers/video/fbcmap.c
43305 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
43306 rc = -ENODEV;
43307 goto out;
43308 }
43309 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
43310 - !info->fbops->fb_setcmap)) {
43311 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
43312 rc = -EINVAL;
43313 goto out1;
43314 }
43315 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43316 index 99bbd28..ad3829e 100644
43317 --- a/drivers/video/fbmem.c
43318 +++ b/drivers/video/fbmem.c
43319 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43320 image->dx += image->width + 8;
43321 }
43322 } else if (rotate == FB_ROTATE_UD) {
43323 - for (x = 0; x < num && image->dx >= 0; x++) {
43324 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43325 info->fbops->fb_imageblit(info, image);
43326 image->dx -= image->width + 8;
43327 }
43328 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43329 image->dy += image->height + 8;
43330 }
43331 } else if (rotate == FB_ROTATE_CCW) {
43332 - for (x = 0; x < num && image->dy >= 0; x++) {
43333 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43334 info->fbops->fb_imageblit(info, image);
43335 image->dy -= image->height + 8;
43336 }
43337 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
43338 int flags = info->flags;
43339 int ret = 0;
43340
43341 + pax_track_stack();
43342 +
43343 if (var->activate & FB_ACTIVATE_INV_MODE) {
43344 struct fb_videomode mode1, mode2;
43345
43346 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43347 void __user *argp = (void __user *)arg;
43348 long ret = 0;
43349
43350 + pax_track_stack();
43351 +
43352 switch (cmd) {
43353 case FBIOGET_VSCREENINFO:
43354 if (!lock_fb_info(info))
43355 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43356 return -EFAULT;
43357 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43358 return -EINVAL;
43359 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43360 + if (con2fb.framebuffer >= FB_MAX)
43361 return -EINVAL;
43362 if (!registered_fb[con2fb.framebuffer])
43363 request_module("fb%d", con2fb.framebuffer);
43364 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
43365 index f20eff8..3e4f622 100644
43366 --- a/drivers/video/geode/gx1fb_core.c
43367 +++ b/drivers/video/geode/gx1fb_core.c
43368 @@ -30,7 +30,7 @@ static int crt_option = 1;
43369 static char panel_option[32] = "";
43370
43371 /* Modes relevant to the GX1 (taken from modedb.c) */
43372 -static const struct fb_videomode __initdata gx1_modedb[] = {
43373 +static const struct fb_videomode __initconst gx1_modedb[] = {
43374 /* 640x480-60 VESA */
43375 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
43376 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
43377 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
43378 index 896e53d..4d87d0b 100644
43379 --- a/drivers/video/gxt4500.c
43380 +++ b/drivers/video/gxt4500.c
43381 @@ -156,7 +156,7 @@ struct gxt4500_par {
43382 static char *mode_option;
43383
43384 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
43385 -static const struct fb_videomode defaultmode __devinitdata = {
43386 +static const struct fb_videomode defaultmode __devinitconst = {
43387 .refresh = 60,
43388 .xres = 1280,
43389 .yres = 1024,
43390 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
43391 return 0;
43392 }
43393
43394 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
43395 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
43396 .id = "IBM GXT4500P",
43397 .type = FB_TYPE_PACKED_PIXELS,
43398 .visual = FB_VISUAL_PSEUDOCOLOR,
43399 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43400 index f5bedee..28c6028 100644
43401 --- a/drivers/video/i810/i810_accel.c
43402 +++ b/drivers/video/i810/i810_accel.c
43403 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43404 }
43405 }
43406 printk("ringbuffer lockup!!!\n");
43407 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43408 i810_report_error(mmio);
43409 par->dev_flags |= LOCKUP;
43410 info->pixmap.scan_align = 1;
43411 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
43412 index 5743ea2..457f82c 100644
43413 --- a/drivers/video/i810/i810_main.c
43414 +++ b/drivers/video/i810/i810_main.c
43415 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
43416 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
43417
43418 /* PCI */
43419 -static const char *i810_pci_list[] __devinitdata = {
43420 +static const char *i810_pci_list[] __devinitconst = {
43421 "Intel(R) 810 Framebuffer Device" ,
43422 "Intel(R) 810-DC100 Framebuffer Device" ,
43423 "Intel(R) 810E Framebuffer Device" ,
43424 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43425 index 3c14e43..eafa544 100644
43426 --- a/drivers/video/logo/logo_linux_clut224.ppm
43427 +++ b/drivers/video/logo/logo_linux_clut224.ppm
43428 @@ -1,1604 +1,1123 @@
43429 P3
43430 -# Standard 224-color Linux logo
43431 80 80
43432 255
43433 - 0 0 0 0 0 0 0 0 0 0 0 0
43434 - 0 0 0 0 0 0 0 0 0 0 0 0
43435 - 0 0 0 0 0 0 0 0 0 0 0 0
43436 - 0 0 0 0 0 0 0 0 0 0 0 0
43437 - 0 0 0 0 0 0 0 0 0 0 0 0
43438 - 0 0 0 0 0 0 0 0 0 0 0 0
43439 - 0 0 0 0 0 0 0 0 0 0 0 0
43440 - 0 0 0 0 0 0 0 0 0 0 0 0
43441 - 0 0 0 0 0 0 0 0 0 0 0 0
43442 - 6 6 6 6 6 6 10 10 10 10 10 10
43443 - 10 10 10 6 6 6 6 6 6 6 6 6
43444 - 0 0 0 0 0 0 0 0 0 0 0 0
43445 - 0 0 0 0 0 0 0 0 0 0 0 0
43446 - 0 0 0 0 0 0 0 0 0 0 0 0
43447 - 0 0 0 0 0 0 0 0 0 0 0 0
43448 - 0 0 0 0 0 0 0 0 0 0 0 0
43449 - 0 0 0 0 0 0 0 0 0 0 0 0
43450 - 0 0 0 0 0 0 0 0 0 0 0 0
43451 - 0 0 0 0 0 0 0 0 0 0 0 0
43452 - 0 0 0 0 0 0 0 0 0 0 0 0
43453 - 0 0 0 0 0 0 0 0 0 0 0 0
43454 - 0 0 0 0 0 0 0 0 0 0 0 0
43455 - 0 0 0 0 0 0 0 0 0 0 0 0
43456 - 0 0 0 0 0 0 0 0 0 0 0 0
43457 - 0 0 0 0 0 0 0 0 0 0 0 0
43458 - 0 0 0 0 0 0 0 0 0 0 0 0
43459 - 0 0 0 0 0 0 0 0 0 0 0 0
43460 - 0 0 0 0 0 0 0 0 0 0 0 0
43461 - 0 0 0 6 6 6 10 10 10 14 14 14
43462 - 22 22 22 26 26 26 30 30 30 34 34 34
43463 - 30 30 30 30 30 30 26 26 26 18 18 18
43464 - 14 14 14 10 10 10 6 6 6 0 0 0
43465 - 0 0 0 0 0 0 0 0 0 0 0 0
43466 - 0 0 0 0 0 0 0 0 0 0 0 0
43467 - 0 0 0 0 0 0 0 0 0 0 0 0
43468 - 0 0 0 0 0 0 0 0 0 0 0 0
43469 - 0 0 0 0 0 0 0 0 0 0 0 0
43470 - 0 0 0 0 0 0 0 0 0 0 0 0
43471 - 0 0 0 0 0 0 0 0 0 0 0 0
43472 - 0 0 0 0 0 0 0 0 0 0 0 0
43473 - 0 0 0 0 0 0 0 0 0 0 0 0
43474 - 0 0 0 0 0 1 0 0 1 0 0 0
43475 - 0 0 0 0 0 0 0 0 0 0 0 0
43476 - 0 0 0 0 0 0 0 0 0 0 0 0
43477 - 0 0 0 0 0 0 0 0 0 0 0 0
43478 - 0 0 0 0 0 0 0 0 0 0 0 0
43479 - 0 0 0 0 0 0 0 0 0 0 0 0
43480 - 0 0 0 0 0 0 0 0 0 0 0 0
43481 - 6 6 6 14 14 14 26 26 26 42 42 42
43482 - 54 54 54 66 66 66 78 78 78 78 78 78
43483 - 78 78 78 74 74 74 66 66 66 54 54 54
43484 - 42 42 42 26 26 26 18 18 18 10 10 10
43485 - 6 6 6 0 0 0 0 0 0 0 0 0
43486 - 0 0 0 0 0 0 0 0 0 0 0 0
43487 - 0 0 0 0 0 0 0 0 0 0 0 0
43488 - 0 0 0 0 0 0 0 0 0 0 0 0
43489 - 0 0 0 0 0 0 0 0 0 0 0 0
43490 - 0 0 0 0 0 0 0 0 0 0 0 0
43491 - 0 0 0 0 0 0 0 0 0 0 0 0
43492 - 0 0 0 0 0 0 0 0 0 0 0 0
43493 - 0 0 0 0 0 0 0 0 0 0 0 0
43494 - 0 0 1 0 0 0 0 0 0 0 0 0
43495 - 0 0 0 0 0 0 0 0 0 0 0 0
43496 - 0 0 0 0 0 0 0 0 0 0 0 0
43497 - 0 0 0 0 0 0 0 0 0 0 0 0
43498 - 0 0 0 0 0 0 0 0 0 0 0 0
43499 - 0 0 0 0 0 0 0 0 0 0 0 0
43500 - 0 0 0 0 0 0 0 0 0 10 10 10
43501 - 22 22 22 42 42 42 66 66 66 86 86 86
43502 - 66 66 66 38 38 38 38 38 38 22 22 22
43503 - 26 26 26 34 34 34 54 54 54 66 66 66
43504 - 86 86 86 70 70 70 46 46 46 26 26 26
43505 - 14 14 14 6 6 6 0 0 0 0 0 0
43506 - 0 0 0 0 0 0 0 0 0 0 0 0
43507 - 0 0 0 0 0 0 0 0 0 0 0 0
43508 - 0 0 0 0 0 0 0 0 0 0 0 0
43509 - 0 0 0 0 0 0 0 0 0 0 0 0
43510 - 0 0 0 0 0 0 0 0 0 0 0 0
43511 - 0 0 0 0 0 0 0 0 0 0 0 0
43512 - 0 0 0 0 0 0 0 0 0 0 0 0
43513 - 0 0 0 0 0 0 0 0 0 0 0 0
43514 - 0 0 1 0 0 1 0 0 1 0 0 0
43515 - 0 0 0 0 0 0 0 0 0 0 0 0
43516 - 0 0 0 0 0 0 0 0 0 0 0 0
43517 - 0 0 0 0 0 0 0 0 0 0 0 0
43518 - 0 0 0 0 0 0 0 0 0 0 0 0
43519 - 0 0 0 0 0 0 0 0 0 0 0 0
43520 - 0 0 0 0 0 0 10 10 10 26 26 26
43521 - 50 50 50 82 82 82 58 58 58 6 6 6
43522 - 2 2 6 2 2 6 2 2 6 2 2 6
43523 - 2 2 6 2 2 6 2 2 6 2 2 6
43524 - 6 6 6 54 54 54 86 86 86 66 66 66
43525 - 38 38 38 18 18 18 6 6 6 0 0 0
43526 - 0 0 0 0 0 0 0 0 0 0 0 0
43527 - 0 0 0 0 0 0 0 0 0 0 0 0
43528 - 0 0 0 0 0 0 0 0 0 0 0 0
43529 - 0 0 0 0 0 0 0 0 0 0 0 0
43530 - 0 0 0 0 0 0 0 0 0 0 0 0
43531 - 0 0 0 0 0 0 0 0 0 0 0 0
43532 - 0 0 0 0 0 0 0 0 0 0 0 0
43533 - 0 0 0 0 0 0 0 0 0 0 0 0
43534 - 0 0 0 0 0 0 0 0 0 0 0 0
43535 - 0 0 0 0 0 0 0 0 0 0 0 0
43536 - 0 0 0 0 0 0 0 0 0 0 0 0
43537 - 0 0 0 0 0 0 0 0 0 0 0 0
43538 - 0 0 0 0 0 0 0 0 0 0 0 0
43539 - 0 0 0 0 0 0 0 0 0 0 0 0
43540 - 0 0 0 6 6 6 22 22 22 50 50 50
43541 - 78 78 78 34 34 34 2 2 6 2 2 6
43542 - 2 2 6 2 2 6 2 2 6 2 2 6
43543 - 2 2 6 2 2 6 2 2 6 2 2 6
43544 - 2 2 6 2 2 6 6 6 6 70 70 70
43545 - 78 78 78 46 46 46 22 22 22 6 6 6
43546 - 0 0 0 0 0 0 0 0 0 0 0 0
43547 - 0 0 0 0 0 0 0 0 0 0 0 0
43548 - 0 0 0 0 0 0 0 0 0 0 0 0
43549 - 0 0 0 0 0 0 0 0 0 0 0 0
43550 - 0 0 0 0 0 0 0 0 0 0 0 0
43551 - 0 0 0 0 0 0 0 0 0 0 0 0
43552 - 0 0 0 0 0 0 0 0 0 0 0 0
43553 - 0 0 0 0 0 0 0 0 0 0 0 0
43554 - 0 0 1 0 0 1 0 0 1 0 0 0
43555 - 0 0 0 0 0 0 0 0 0 0 0 0
43556 - 0 0 0 0 0 0 0 0 0 0 0 0
43557 - 0 0 0 0 0 0 0 0 0 0 0 0
43558 - 0 0 0 0 0 0 0 0 0 0 0 0
43559 - 0 0 0 0 0 0 0 0 0 0 0 0
43560 - 6 6 6 18 18 18 42 42 42 82 82 82
43561 - 26 26 26 2 2 6 2 2 6 2 2 6
43562 - 2 2 6 2 2 6 2 2 6 2 2 6
43563 - 2 2 6 2 2 6 2 2 6 14 14 14
43564 - 46 46 46 34 34 34 6 6 6 2 2 6
43565 - 42 42 42 78 78 78 42 42 42 18 18 18
43566 - 6 6 6 0 0 0 0 0 0 0 0 0
43567 - 0 0 0 0 0 0 0 0 0 0 0 0
43568 - 0 0 0 0 0 0 0 0 0 0 0 0
43569 - 0 0 0 0 0 0 0 0 0 0 0 0
43570 - 0 0 0 0 0 0 0 0 0 0 0 0
43571 - 0 0 0 0 0 0 0 0 0 0 0 0
43572 - 0 0 0 0 0 0 0 0 0 0 0 0
43573 - 0 0 0 0 0 0 0 0 0 0 0 0
43574 - 0 0 1 0 0 0 0 0 1 0 0 0
43575 - 0 0 0 0 0 0 0 0 0 0 0 0
43576 - 0 0 0 0 0 0 0 0 0 0 0 0
43577 - 0 0 0 0 0 0 0 0 0 0 0 0
43578 - 0 0 0 0 0 0 0 0 0 0 0 0
43579 - 0 0 0 0 0 0 0 0 0 0 0 0
43580 - 10 10 10 30 30 30 66 66 66 58 58 58
43581 - 2 2 6 2 2 6 2 2 6 2 2 6
43582 - 2 2 6 2 2 6 2 2 6 2 2 6
43583 - 2 2 6 2 2 6 2 2 6 26 26 26
43584 - 86 86 86 101 101 101 46 46 46 10 10 10
43585 - 2 2 6 58 58 58 70 70 70 34 34 34
43586 - 10 10 10 0 0 0 0 0 0 0 0 0
43587 - 0 0 0 0 0 0 0 0 0 0 0 0
43588 - 0 0 0 0 0 0 0 0 0 0 0 0
43589 - 0 0 0 0 0 0 0 0 0 0 0 0
43590 - 0 0 0 0 0 0 0 0 0 0 0 0
43591 - 0 0 0 0 0 0 0 0 0 0 0 0
43592 - 0 0 0 0 0 0 0 0 0 0 0 0
43593 - 0 0 0 0 0 0 0 0 0 0 0 0
43594 - 0 0 1 0 0 1 0 0 1 0 0 0
43595 - 0 0 0 0 0 0 0 0 0 0 0 0
43596 - 0 0 0 0 0 0 0 0 0 0 0 0
43597 - 0 0 0 0 0 0 0 0 0 0 0 0
43598 - 0 0 0 0 0 0 0 0 0 0 0 0
43599 - 0 0 0 0 0 0 0 0 0 0 0 0
43600 - 14 14 14 42 42 42 86 86 86 10 10 10
43601 - 2 2 6 2 2 6 2 2 6 2 2 6
43602 - 2 2 6 2 2 6 2 2 6 2 2 6
43603 - 2 2 6 2 2 6 2 2 6 30 30 30
43604 - 94 94 94 94 94 94 58 58 58 26 26 26
43605 - 2 2 6 6 6 6 78 78 78 54 54 54
43606 - 22 22 22 6 6 6 0 0 0 0 0 0
43607 - 0 0 0 0 0 0 0 0 0 0 0 0
43608 - 0 0 0 0 0 0 0 0 0 0 0 0
43609 - 0 0 0 0 0 0 0 0 0 0 0 0
43610 - 0 0 0 0 0 0 0 0 0 0 0 0
43611 - 0 0 0 0 0 0 0 0 0 0 0 0
43612 - 0 0 0 0 0 0 0 0 0 0 0 0
43613 - 0 0 0 0 0 0 0 0 0 0 0 0
43614 - 0 0 0 0 0 0 0 0 0 0 0 0
43615 - 0 0 0 0 0 0 0 0 0 0 0 0
43616 - 0 0 0 0 0 0 0 0 0 0 0 0
43617 - 0 0 0 0 0 0 0 0 0 0 0 0
43618 - 0 0 0 0 0 0 0 0 0 0 0 0
43619 - 0 0 0 0 0 0 0 0 0 6 6 6
43620 - 22 22 22 62 62 62 62 62 62 2 2 6
43621 - 2 2 6 2 2 6 2 2 6 2 2 6
43622 - 2 2 6 2 2 6 2 2 6 2 2 6
43623 - 2 2 6 2 2 6 2 2 6 26 26 26
43624 - 54 54 54 38 38 38 18 18 18 10 10 10
43625 - 2 2 6 2 2 6 34 34 34 82 82 82
43626 - 38 38 38 14 14 14 0 0 0 0 0 0
43627 - 0 0 0 0 0 0 0 0 0 0 0 0
43628 - 0 0 0 0 0 0 0 0 0 0 0 0
43629 - 0 0 0 0 0 0 0 0 0 0 0 0
43630 - 0 0 0 0 0 0 0 0 0 0 0 0
43631 - 0 0 0 0 0 0 0 0 0 0 0 0
43632 - 0 0 0 0 0 0 0 0 0 0 0 0
43633 - 0 0 0 0 0 0 0 0 0 0 0 0
43634 - 0 0 0 0 0 1 0 0 1 0 0 0
43635 - 0 0 0 0 0 0 0 0 0 0 0 0
43636 - 0 0 0 0 0 0 0 0 0 0 0 0
43637 - 0 0 0 0 0 0 0 0 0 0 0 0
43638 - 0 0 0 0 0 0 0 0 0 0 0 0
43639 - 0 0 0 0 0 0 0 0 0 6 6 6
43640 - 30 30 30 78 78 78 30 30 30 2 2 6
43641 - 2 2 6 2 2 6 2 2 6 2 2 6
43642 - 2 2 6 2 2 6 2 2 6 2 2 6
43643 - 2 2 6 2 2 6 2 2 6 10 10 10
43644 - 10 10 10 2 2 6 2 2 6 2 2 6
43645 - 2 2 6 2 2 6 2 2 6 78 78 78
43646 - 50 50 50 18 18 18 6 6 6 0 0 0
43647 - 0 0 0 0 0 0 0 0 0 0 0 0
43648 - 0 0 0 0 0 0 0 0 0 0 0 0
43649 - 0 0 0 0 0 0 0 0 0 0 0 0
43650 - 0 0 0 0 0 0 0 0 0 0 0 0
43651 - 0 0 0 0 0 0 0 0 0 0 0 0
43652 - 0 0 0 0 0 0 0 0 0 0 0 0
43653 - 0 0 0 0 0 0 0 0 0 0 0 0
43654 - 0 0 1 0 0 0 0 0 0 0 0 0
43655 - 0 0 0 0 0 0 0 0 0 0 0 0
43656 - 0 0 0 0 0 0 0 0 0 0 0 0
43657 - 0 0 0 0 0 0 0 0 0 0 0 0
43658 - 0 0 0 0 0 0 0 0 0 0 0 0
43659 - 0 0 0 0 0 0 0 0 0 10 10 10
43660 - 38 38 38 86 86 86 14 14 14 2 2 6
43661 - 2 2 6 2 2 6 2 2 6 2 2 6
43662 - 2 2 6 2 2 6 2 2 6 2 2 6
43663 - 2 2 6 2 2 6 2 2 6 2 2 6
43664 - 2 2 6 2 2 6 2 2 6 2 2 6
43665 - 2 2 6 2 2 6 2 2 6 54 54 54
43666 - 66 66 66 26 26 26 6 6 6 0 0 0
43667 - 0 0 0 0 0 0 0 0 0 0 0 0
43668 - 0 0 0 0 0 0 0 0 0 0 0 0
43669 - 0 0 0 0 0 0 0 0 0 0 0 0
43670 - 0 0 0 0 0 0 0 0 0 0 0 0
43671 - 0 0 0 0 0 0 0 0 0 0 0 0
43672 - 0 0 0 0 0 0 0 0 0 0 0 0
43673 - 0 0 0 0 0 0 0 0 0 0 0 0
43674 - 0 0 0 0 0 1 0 0 1 0 0 0
43675 - 0 0 0 0 0 0 0 0 0 0 0 0
43676 - 0 0 0 0 0 0 0 0 0 0 0 0
43677 - 0 0 0 0 0 0 0 0 0 0 0 0
43678 - 0 0 0 0 0 0 0 0 0 0 0 0
43679 - 0 0 0 0 0 0 0 0 0 14 14 14
43680 - 42 42 42 82 82 82 2 2 6 2 2 6
43681 - 2 2 6 6 6 6 10 10 10 2 2 6
43682 - 2 2 6 2 2 6 2 2 6 2 2 6
43683 - 2 2 6 2 2 6 2 2 6 6 6 6
43684 - 14 14 14 10 10 10 2 2 6 2 2 6
43685 - 2 2 6 2 2 6 2 2 6 18 18 18
43686 - 82 82 82 34 34 34 10 10 10 0 0 0
43687 - 0 0 0 0 0 0 0 0 0 0 0 0
43688 - 0 0 0 0 0 0 0 0 0 0 0 0
43689 - 0 0 0 0 0 0 0 0 0 0 0 0
43690 - 0 0 0 0 0 0 0 0 0 0 0 0
43691 - 0 0 0 0 0 0 0 0 0 0 0 0
43692 - 0 0 0 0 0 0 0 0 0 0 0 0
43693 - 0 0 0 0 0 0 0 0 0 0 0 0
43694 - 0 0 1 0 0 0 0 0 0 0 0 0
43695 - 0 0 0 0 0 0 0 0 0 0 0 0
43696 - 0 0 0 0 0 0 0 0 0 0 0 0
43697 - 0 0 0 0 0 0 0 0 0 0 0 0
43698 - 0 0 0 0 0 0 0 0 0 0 0 0
43699 - 0 0 0 0 0 0 0 0 0 14 14 14
43700 - 46 46 46 86 86 86 2 2 6 2 2 6
43701 - 6 6 6 6 6 6 22 22 22 34 34 34
43702 - 6 6 6 2 2 6 2 2 6 2 2 6
43703 - 2 2 6 2 2 6 18 18 18 34 34 34
43704 - 10 10 10 50 50 50 22 22 22 2 2 6
43705 - 2 2 6 2 2 6 2 2 6 10 10 10
43706 - 86 86 86 42 42 42 14 14 14 0 0 0
43707 - 0 0 0 0 0 0 0 0 0 0 0 0
43708 - 0 0 0 0 0 0 0 0 0 0 0 0
43709 - 0 0 0 0 0 0 0 0 0 0 0 0
43710 - 0 0 0 0 0 0 0 0 0 0 0 0
43711 - 0 0 0 0 0 0 0 0 0 0 0 0
43712 - 0 0 0 0 0 0 0 0 0 0 0 0
43713 - 0 0 0 0 0 0 0 0 0 0 0 0
43714 - 0 0 1 0 0 1 0 0 1 0 0 0
43715 - 0 0 0 0 0 0 0 0 0 0 0 0
43716 - 0 0 0 0 0 0 0 0 0 0 0 0
43717 - 0 0 0 0 0 0 0 0 0 0 0 0
43718 - 0 0 0 0 0 0 0 0 0 0 0 0
43719 - 0 0 0 0 0 0 0 0 0 14 14 14
43720 - 46 46 46 86 86 86 2 2 6 2 2 6
43721 - 38 38 38 116 116 116 94 94 94 22 22 22
43722 - 22 22 22 2 2 6 2 2 6 2 2 6
43723 - 14 14 14 86 86 86 138 138 138 162 162 162
43724 -154 154 154 38 38 38 26 26 26 6 6 6
43725 - 2 2 6 2 2 6 2 2 6 2 2 6
43726 - 86 86 86 46 46 46 14 14 14 0 0 0
43727 - 0 0 0 0 0 0 0 0 0 0 0 0
43728 - 0 0 0 0 0 0 0 0 0 0 0 0
43729 - 0 0 0 0 0 0 0 0 0 0 0 0
43730 - 0 0 0 0 0 0 0 0 0 0 0 0
43731 - 0 0 0 0 0 0 0 0 0 0 0 0
43732 - 0 0 0 0 0 0 0 0 0 0 0 0
43733 - 0 0 0 0 0 0 0 0 0 0 0 0
43734 - 0 0 0 0 0 0 0 0 0 0 0 0
43735 - 0 0 0 0 0 0 0 0 0 0 0 0
43736 - 0 0 0 0 0 0 0 0 0 0 0 0
43737 - 0 0 0 0 0 0 0 0 0 0 0 0
43738 - 0 0 0 0 0 0 0 0 0 0 0 0
43739 - 0 0 0 0 0 0 0 0 0 14 14 14
43740 - 46 46 46 86 86 86 2 2 6 14 14 14
43741 -134 134 134 198 198 198 195 195 195 116 116 116
43742 - 10 10 10 2 2 6 2 2 6 6 6 6
43743 -101 98 89 187 187 187 210 210 210 218 218 218
43744 -214 214 214 134 134 134 14 14 14 6 6 6
43745 - 2 2 6 2 2 6 2 2 6 2 2 6
43746 - 86 86 86 50 50 50 18 18 18 6 6 6
43747 - 0 0 0 0 0 0 0 0 0 0 0 0
43748 - 0 0 0 0 0 0 0 0 0 0 0 0
43749 - 0 0 0 0 0 0 0 0 0 0 0 0
43750 - 0 0 0 0 0 0 0 0 0 0 0 0
43751 - 0 0 0 0 0 0 0 0 0 0 0 0
43752 - 0 0 0 0 0 0 0 0 0 0 0 0
43753 - 0 0 0 0 0 0 0 0 1 0 0 0
43754 - 0 0 1 0 0 1 0 0 1 0 0 0
43755 - 0 0 0 0 0 0 0 0 0 0 0 0
43756 - 0 0 0 0 0 0 0 0 0 0 0 0
43757 - 0 0 0 0 0 0 0 0 0 0 0 0
43758 - 0 0 0 0 0 0 0 0 0 0 0 0
43759 - 0 0 0 0 0 0 0 0 0 14 14 14
43760 - 46 46 46 86 86 86 2 2 6 54 54 54
43761 -218 218 218 195 195 195 226 226 226 246 246 246
43762 - 58 58 58 2 2 6 2 2 6 30 30 30
43763 -210 210 210 253 253 253 174 174 174 123 123 123
43764 -221 221 221 234 234 234 74 74 74 2 2 6
43765 - 2 2 6 2 2 6 2 2 6 2 2 6
43766 - 70 70 70 58 58 58 22 22 22 6 6 6
43767 - 0 0 0 0 0 0 0 0 0 0 0 0
43768 - 0 0 0 0 0 0 0 0 0 0 0 0
43769 - 0 0 0 0 0 0 0 0 0 0 0 0
43770 - 0 0 0 0 0 0 0 0 0 0 0 0
43771 - 0 0 0 0 0 0 0 0 0 0 0 0
43772 - 0 0 0 0 0 0 0 0 0 0 0 0
43773 - 0 0 0 0 0 0 0 0 0 0 0 0
43774 - 0 0 0 0 0 0 0 0 0 0 0 0
43775 - 0 0 0 0 0 0 0 0 0 0 0 0
43776 - 0 0 0 0 0 0 0 0 0 0 0 0
43777 - 0 0 0 0 0 0 0 0 0 0 0 0
43778 - 0 0 0 0 0 0 0 0 0 0 0 0
43779 - 0 0 0 0 0 0 0 0 0 14 14 14
43780 - 46 46 46 82 82 82 2 2 6 106 106 106
43781 -170 170 170 26 26 26 86 86 86 226 226 226
43782 -123 123 123 10 10 10 14 14 14 46 46 46
43783 -231 231 231 190 190 190 6 6 6 70 70 70
43784 - 90 90 90 238 238 238 158 158 158 2 2 6
43785 - 2 2 6 2 2 6 2 2 6 2 2 6
43786 - 70 70 70 58 58 58 22 22 22 6 6 6
43787 - 0 0 0 0 0 0 0 0 0 0 0 0
43788 - 0 0 0 0 0 0 0 0 0 0 0 0
43789 - 0 0 0 0 0 0 0 0 0 0 0 0
43790 - 0 0 0 0 0 0 0 0 0 0 0 0
43791 - 0 0 0 0 0 0 0 0 0 0 0 0
43792 - 0 0 0 0 0 0 0 0 0 0 0 0
43793 - 0 0 0 0 0 0 0 0 1 0 0 0
43794 - 0 0 1 0 0 1 0 0 1 0 0 0
43795 - 0 0 0 0 0 0 0 0 0 0 0 0
43796 - 0 0 0 0 0 0 0 0 0 0 0 0
43797 - 0 0 0 0 0 0 0 0 0 0 0 0
43798 - 0 0 0 0 0 0 0 0 0 0 0 0
43799 - 0 0 0 0 0 0 0 0 0 14 14 14
43800 - 42 42 42 86 86 86 6 6 6 116 116 116
43801 -106 106 106 6 6 6 70 70 70 149 149 149
43802 -128 128 128 18 18 18 38 38 38 54 54 54
43803 -221 221 221 106 106 106 2 2 6 14 14 14
43804 - 46 46 46 190 190 190 198 198 198 2 2 6
43805 - 2 2 6 2 2 6 2 2 6 2 2 6
43806 - 74 74 74 62 62 62 22 22 22 6 6 6
43807 - 0 0 0 0 0 0 0 0 0 0 0 0
43808 - 0 0 0 0 0 0 0 0 0 0 0 0
43809 - 0 0 0 0 0 0 0 0 0 0 0 0
43810 - 0 0 0 0 0 0 0 0 0 0 0 0
43811 - 0 0 0 0 0 0 0 0 0 0 0 0
43812 - 0 0 0 0 0 0 0 0 0 0 0 0
43813 - 0 0 0 0 0 0 0 0 1 0 0 0
43814 - 0 0 1 0 0 0 0 0 1 0 0 0
43815 - 0 0 0 0 0 0 0 0 0 0 0 0
43816 - 0 0 0 0 0 0 0 0 0 0 0 0
43817 - 0 0 0 0 0 0 0 0 0 0 0 0
43818 - 0 0 0 0 0 0 0 0 0 0 0 0
43819 - 0 0 0 0 0 0 0 0 0 14 14 14
43820 - 42 42 42 94 94 94 14 14 14 101 101 101
43821 -128 128 128 2 2 6 18 18 18 116 116 116
43822 -118 98 46 121 92 8 121 92 8 98 78 10
43823 -162 162 162 106 106 106 2 2 6 2 2 6
43824 - 2 2 6 195 195 195 195 195 195 6 6 6
43825 - 2 2 6 2 2 6 2 2 6 2 2 6
43826 - 74 74 74 62 62 62 22 22 22 6 6 6
43827 - 0 0 0 0 0 0 0 0 0 0 0 0
43828 - 0 0 0 0 0 0 0 0 0 0 0 0
43829 - 0 0 0 0 0 0 0 0 0 0 0 0
43830 - 0 0 0 0 0 0 0 0 0 0 0 0
43831 - 0 0 0 0 0 0 0 0 0 0 0 0
43832 - 0 0 0 0 0 0 0 0 0 0 0 0
43833 - 0 0 0 0 0 0 0 0 1 0 0 1
43834 - 0 0 1 0 0 0 0 0 1 0 0 0
43835 - 0 0 0 0 0 0 0 0 0 0 0 0
43836 - 0 0 0 0 0 0 0 0 0 0 0 0
43837 - 0 0 0 0 0 0 0 0 0 0 0 0
43838 - 0 0 0 0 0 0 0 0 0 0 0 0
43839 - 0 0 0 0 0 0 0 0 0 10 10 10
43840 - 38 38 38 90 90 90 14 14 14 58 58 58
43841 -210 210 210 26 26 26 54 38 6 154 114 10
43842 -226 170 11 236 186 11 225 175 15 184 144 12
43843 -215 174 15 175 146 61 37 26 9 2 2 6
43844 - 70 70 70 246 246 246 138 138 138 2 2 6
43845 - 2 2 6 2 2 6 2 2 6 2 2 6
43846 - 70 70 70 66 66 66 26 26 26 6 6 6
43847 - 0 0 0 0 0 0 0 0 0 0 0 0
43848 - 0 0 0 0 0 0 0 0 0 0 0 0
43849 - 0 0 0 0 0 0 0 0 0 0 0 0
43850 - 0 0 0 0 0 0 0 0 0 0 0 0
43851 - 0 0 0 0 0 0 0 0 0 0 0 0
43852 - 0 0 0 0 0 0 0 0 0 0 0 0
43853 - 0 0 0 0 0 0 0 0 0 0 0 0
43854 - 0 0 0 0 0 0 0 0 0 0 0 0
43855 - 0 0 0 0 0 0 0 0 0 0 0 0
43856 - 0 0 0 0 0 0 0 0 0 0 0 0
43857 - 0 0 0 0 0 0 0 0 0 0 0 0
43858 - 0 0 0 0 0 0 0 0 0 0 0 0
43859 - 0 0 0 0 0 0 0 0 0 10 10 10
43860 - 38 38 38 86 86 86 14 14 14 10 10 10
43861 -195 195 195 188 164 115 192 133 9 225 175 15
43862 -239 182 13 234 190 10 232 195 16 232 200 30
43863 -245 207 45 241 208 19 232 195 16 184 144 12
43864 -218 194 134 211 206 186 42 42 42 2 2 6
43865 - 2 2 6 2 2 6 2 2 6 2 2 6
43866 - 50 50 50 74 74 74 30 30 30 6 6 6
43867 - 0 0 0 0 0 0 0 0 0 0 0 0
43868 - 0 0 0 0 0 0 0 0 0 0 0 0
43869 - 0 0 0 0 0 0 0 0 0 0 0 0
43870 - 0 0 0 0 0 0 0 0 0 0 0 0
43871 - 0 0 0 0 0 0 0 0 0 0 0 0
43872 - 0 0 0 0 0 0 0 0 0 0 0 0
43873 - 0 0 0 0 0 0 0 0 0 0 0 0
43874 - 0 0 0 0 0 0 0 0 0 0 0 0
43875 - 0 0 0 0 0 0 0 0 0 0 0 0
43876 - 0 0 0 0 0 0 0 0 0 0 0 0
43877 - 0 0 0 0 0 0 0 0 0 0 0 0
43878 - 0 0 0 0 0 0 0 0 0 0 0 0
43879 - 0 0 0 0 0 0 0 0 0 10 10 10
43880 - 34 34 34 86 86 86 14 14 14 2 2 6
43881 -121 87 25 192 133 9 219 162 10 239 182 13
43882 -236 186 11 232 195 16 241 208 19 244 214 54
43883 -246 218 60 246 218 38 246 215 20 241 208 19
43884 -241 208 19 226 184 13 121 87 25 2 2 6
43885 - 2 2 6 2 2 6 2 2 6 2 2 6
43886 - 50 50 50 82 82 82 34 34 34 10 10 10
43887 - 0 0 0 0 0 0 0 0 0 0 0 0
43888 - 0 0 0 0 0 0 0 0 0 0 0 0
43889 - 0 0 0 0 0 0 0 0 0 0 0 0
43890 - 0 0 0 0 0 0 0 0 0 0 0 0
43891 - 0 0 0 0 0 0 0 0 0 0 0 0
43892 - 0 0 0 0 0 0 0 0 0 0 0 0
43893 - 0 0 0 0 0 0 0 0 0 0 0 0
43894 - 0 0 0 0 0 0 0 0 0 0 0 0
43895 - 0 0 0 0 0 0 0 0 0 0 0 0
43896 - 0 0 0 0 0 0 0 0 0 0 0 0
43897 - 0 0 0 0 0 0 0 0 0 0 0 0
43898 - 0 0 0 0 0 0 0 0 0 0 0 0
43899 - 0 0 0 0 0 0 0 0 0 10 10 10
43900 - 34 34 34 82 82 82 30 30 30 61 42 6
43901 -180 123 7 206 145 10 230 174 11 239 182 13
43902 -234 190 10 238 202 15 241 208 19 246 218 74
43903 -246 218 38 246 215 20 246 215 20 246 215 20
43904 -226 184 13 215 174 15 184 144 12 6 6 6
43905 - 2 2 6 2 2 6 2 2 6 2 2 6
43906 - 26 26 26 94 94 94 42 42 42 14 14 14
43907 - 0 0 0 0 0 0 0 0 0 0 0 0
43908 - 0 0 0 0 0 0 0 0 0 0 0 0
43909 - 0 0 0 0 0 0 0 0 0 0 0 0
43910 - 0 0 0 0 0 0 0 0 0 0 0 0
43911 - 0 0 0 0 0 0 0 0 0 0 0 0
43912 - 0 0 0 0 0 0 0 0 0 0 0 0
43913 - 0 0 0 0 0 0 0 0 0 0 0 0
43914 - 0 0 0 0 0 0 0 0 0 0 0 0
43915 - 0 0 0 0 0 0 0 0 0 0 0 0
43916 - 0 0 0 0 0 0 0 0 0 0 0 0
43917 - 0 0 0 0 0 0 0 0 0 0 0 0
43918 - 0 0 0 0 0 0 0 0 0 0 0 0
43919 - 0 0 0 0 0 0 0 0 0 10 10 10
43920 - 30 30 30 78 78 78 50 50 50 104 69 6
43921 -192 133 9 216 158 10 236 178 12 236 186 11
43922 -232 195 16 241 208 19 244 214 54 245 215 43
43923 -246 215 20 246 215 20 241 208 19 198 155 10
43924 -200 144 11 216 158 10 156 118 10 2 2 6
43925 - 2 2 6 2 2 6 2 2 6 2 2 6
43926 - 6 6 6 90 90 90 54 54 54 18 18 18
43927 - 6 6 6 0 0 0 0 0 0 0 0 0
43928 - 0 0 0 0 0 0 0 0 0 0 0 0
43929 - 0 0 0 0 0 0 0 0 0 0 0 0
43930 - 0 0 0 0 0 0 0 0 0 0 0 0
43931 - 0 0 0 0 0 0 0 0 0 0 0 0
43932 - 0 0 0 0 0 0 0 0 0 0 0 0
43933 - 0 0 0 0 0 0 0 0 0 0 0 0
43934 - 0 0 0 0 0 0 0 0 0 0 0 0
43935 - 0 0 0 0 0 0 0 0 0 0 0 0
43936 - 0 0 0 0 0 0 0 0 0 0 0 0
43937 - 0 0 0 0 0 0 0 0 0 0 0 0
43938 - 0 0 0 0 0 0 0 0 0 0 0 0
43939 - 0 0 0 0 0 0 0 0 0 10 10 10
43940 - 30 30 30 78 78 78 46 46 46 22 22 22
43941 -137 92 6 210 162 10 239 182 13 238 190 10
43942 -238 202 15 241 208 19 246 215 20 246 215 20
43943 -241 208 19 203 166 17 185 133 11 210 150 10
43944 -216 158 10 210 150 10 102 78 10 2 2 6
43945 - 6 6 6 54 54 54 14 14 14 2 2 6
43946 - 2 2 6 62 62 62 74 74 74 30 30 30
43947 - 10 10 10 0 0 0 0 0 0 0 0 0
43948 - 0 0 0 0 0 0 0 0 0 0 0 0
43949 - 0 0 0 0 0 0 0 0 0 0 0 0
43950 - 0 0 0 0 0 0 0 0 0 0 0 0
43951 - 0 0 0 0 0 0 0 0 0 0 0 0
43952 - 0 0 0 0 0 0 0 0 0 0 0 0
43953 - 0 0 0 0 0 0 0 0 0 0 0 0
43954 - 0 0 0 0 0 0 0 0 0 0 0 0
43955 - 0 0 0 0 0 0 0 0 0 0 0 0
43956 - 0 0 0 0 0 0 0 0 0 0 0 0
43957 - 0 0 0 0 0 0 0 0 0 0 0 0
43958 - 0 0 0 0 0 0 0 0 0 0 0 0
43959 - 0 0 0 0 0 0 0 0 0 10 10 10
43960 - 34 34 34 78 78 78 50 50 50 6 6 6
43961 - 94 70 30 139 102 15 190 146 13 226 184 13
43962 -232 200 30 232 195 16 215 174 15 190 146 13
43963 -168 122 10 192 133 9 210 150 10 213 154 11
43964 -202 150 34 182 157 106 101 98 89 2 2 6
43965 - 2 2 6 78 78 78 116 116 116 58 58 58
43966 - 2 2 6 22 22 22 90 90 90 46 46 46
43967 - 18 18 18 6 6 6 0 0 0 0 0 0
43968 - 0 0 0 0 0 0 0 0 0 0 0 0
43969 - 0 0 0 0 0 0 0 0 0 0 0 0
43970 - 0 0 0 0 0 0 0 0 0 0 0 0
43971 - 0 0 0 0 0 0 0 0 0 0 0 0
43972 - 0 0 0 0 0 0 0 0 0 0 0 0
43973 - 0 0 0 0 0 0 0 0 0 0 0 0
43974 - 0 0 0 0 0 0 0 0 0 0 0 0
43975 - 0 0 0 0 0 0 0 0 0 0 0 0
43976 - 0 0 0 0 0 0 0 0 0 0 0 0
43977 - 0 0 0 0 0 0 0 0 0 0 0 0
43978 - 0 0 0 0 0 0 0 0 0 0 0 0
43979 - 0 0 0 0 0 0 0 0 0 10 10 10
43980 - 38 38 38 86 86 86 50 50 50 6 6 6
43981 -128 128 128 174 154 114 156 107 11 168 122 10
43982 -198 155 10 184 144 12 197 138 11 200 144 11
43983 -206 145 10 206 145 10 197 138 11 188 164 115
43984 -195 195 195 198 198 198 174 174 174 14 14 14
43985 - 2 2 6 22 22 22 116 116 116 116 116 116
43986 - 22 22 22 2 2 6 74 74 74 70 70 70
43987 - 30 30 30 10 10 10 0 0 0 0 0 0
43988 - 0 0 0 0 0 0 0 0 0 0 0 0
43989 - 0 0 0 0 0 0 0 0 0 0 0 0
43990 - 0 0 0 0 0 0 0 0 0 0 0 0
43991 - 0 0 0 0 0 0 0 0 0 0 0 0
43992 - 0 0 0 0 0 0 0 0 0 0 0 0
43993 - 0 0 0 0 0 0 0 0 0 0 0 0
43994 - 0 0 0 0 0 0 0 0 0 0 0 0
43995 - 0 0 0 0 0 0 0 0 0 0 0 0
43996 - 0 0 0 0 0 0 0 0 0 0 0 0
43997 - 0 0 0 0 0 0 0 0 0 0 0 0
43998 - 0 0 0 0 0 0 0 0 0 0 0 0
43999 - 0 0 0 0 0 0 6 6 6 18 18 18
44000 - 50 50 50 101 101 101 26 26 26 10 10 10
44001 -138 138 138 190 190 190 174 154 114 156 107 11
44002 -197 138 11 200 144 11 197 138 11 192 133 9
44003 -180 123 7 190 142 34 190 178 144 187 187 187
44004 -202 202 202 221 221 221 214 214 214 66 66 66
44005 - 2 2 6 2 2 6 50 50 50 62 62 62
44006 - 6 6 6 2 2 6 10 10 10 90 90 90
44007 - 50 50 50 18 18 18 6 6 6 0 0 0
44008 - 0 0 0 0 0 0 0 0 0 0 0 0
44009 - 0 0 0 0 0 0 0 0 0 0 0 0
44010 - 0 0 0 0 0 0 0 0 0 0 0 0
44011 - 0 0 0 0 0 0 0 0 0 0 0 0
44012 - 0 0 0 0 0 0 0 0 0 0 0 0
44013 - 0 0 0 0 0 0 0 0 0 0 0 0
44014 - 0 0 0 0 0 0 0 0 0 0 0 0
44015 - 0 0 0 0 0 0 0 0 0 0 0 0
44016 - 0 0 0 0 0 0 0 0 0 0 0 0
44017 - 0 0 0 0 0 0 0 0 0 0 0 0
44018 - 0 0 0 0 0 0 0 0 0 0 0 0
44019 - 0 0 0 0 0 0 10 10 10 34 34 34
44020 - 74 74 74 74 74 74 2 2 6 6 6 6
44021 -144 144 144 198 198 198 190 190 190 178 166 146
44022 -154 121 60 156 107 11 156 107 11 168 124 44
44023 -174 154 114 187 187 187 190 190 190 210 210 210
44024 -246 246 246 253 253 253 253 253 253 182 182 182
44025 - 6 6 6 2 2 6 2 2 6 2 2 6
44026 - 2 2 6 2 2 6 2 2 6 62 62 62
44027 - 74 74 74 34 34 34 14 14 14 0 0 0
44028 - 0 0 0 0 0 0 0 0 0 0 0 0
44029 - 0 0 0 0 0 0 0 0 0 0 0 0
44030 - 0 0 0 0 0 0 0 0 0 0 0 0
44031 - 0 0 0 0 0 0 0 0 0 0 0 0
44032 - 0 0 0 0 0 0 0 0 0 0 0 0
44033 - 0 0 0 0 0 0 0 0 0 0 0 0
44034 - 0 0 0 0 0 0 0 0 0 0 0 0
44035 - 0 0 0 0 0 0 0 0 0 0 0 0
44036 - 0 0 0 0 0 0 0 0 0 0 0 0
44037 - 0 0 0 0 0 0 0 0 0 0 0 0
44038 - 0 0 0 0 0 0 0 0 0 0 0 0
44039 - 0 0 0 10 10 10 22 22 22 54 54 54
44040 - 94 94 94 18 18 18 2 2 6 46 46 46
44041 -234 234 234 221 221 221 190 190 190 190 190 190
44042 -190 190 190 187 187 187 187 187 187 190 190 190
44043 -190 190 190 195 195 195 214 214 214 242 242 242
44044 -253 253 253 253 253 253 253 253 253 253 253 253
44045 - 82 82 82 2 2 6 2 2 6 2 2 6
44046 - 2 2 6 2 2 6 2 2 6 14 14 14
44047 - 86 86 86 54 54 54 22 22 22 6 6 6
44048 - 0 0 0 0 0 0 0 0 0 0 0 0
44049 - 0 0 0 0 0 0 0 0 0 0 0 0
44050 - 0 0 0 0 0 0 0 0 0 0 0 0
44051 - 0 0 0 0 0 0 0 0 0 0 0 0
44052 - 0 0 0 0 0 0 0 0 0 0 0 0
44053 - 0 0 0 0 0 0 0 0 0 0 0 0
44054 - 0 0 0 0 0 0 0 0 0 0 0 0
44055 - 0 0 0 0 0 0 0 0 0 0 0 0
44056 - 0 0 0 0 0 0 0 0 0 0 0 0
44057 - 0 0 0 0 0 0 0 0 0 0 0 0
44058 - 0 0 0 0 0 0 0 0 0 0 0 0
44059 - 6 6 6 18 18 18 46 46 46 90 90 90
44060 - 46 46 46 18 18 18 6 6 6 182 182 182
44061 -253 253 253 246 246 246 206 206 206 190 190 190
44062 -190 190 190 190 190 190 190 190 190 190 190 190
44063 -206 206 206 231 231 231 250 250 250 253 253 253
44064 -253 253 253 253 253 253 253 253 253 253 253 253
44065 -202 202 202 14 14 14 2 2 6 2 2 6
44066 - 2 2 6 2 2 6 2 2 6 2 2 6
44067 - 42 42 42 86 86 86 42 42 42 18 18 18
44068 - 6 6 6 0 0 0 0 0 0 0 0 0
44069 - 0 0 0 0 0 0 0 0 0 0 0 0
44070 - 0 0 0 0 0 0 0 0 0 0 0 0
44071 - 0 0 0 0 0 0 0 0 0 0 0 0
44072 - 0 0 0 0 0 0 0 0 0 0 0 0
44073 - 0 0 0 0 0 0 0 0 0 0 0 0
44074 - 0 0 0 0 0 0 0 0 0 0 0 0
44075 - 0 0 0 0 0 0 0 0 0 0 0 0
44076 - 0 0 0 0 0 0 0 0 0 0 0 0
44077 - 0 0 0 0 0 0 0 0 0 0 0 0
44078 - 0 0 0 0 0 0 0 0 0 6 6 6
44079 - 14 14 14 38 38 38 74 74 74 66 66 66
44080 - 2 2 6 6 6 6 90 90 90 250 250 250
44081 -253 253 253 253 253 253 238 238 238 198 198 198
44082 -190 190 190 190 190 190 195 195 195 221 221 221
44083 -246 246 246 253 253 253 253 253 253 253 253 253
44084 -253 253 253 253 253 253 253 253 253 253 253 253
44085 -253 253 253 82 82 82 2 2 6 2 2 6
44086 - 2 2 6 2 2 6 2 2 6 2 2 6
44087 - 2 2 6 78 78 78 70 70 70 34 34 34
44088 - 14 14 14 6 6 6 0 0 0 0 0 0
44089 - 0 0 0 0 0 0 0 0 0 0 0 0
44090 - 0 0 0 0 0 0 0 0 0 0 0 0
44091 - 0 0 0 0 0 0 0 0 0 0 0 0
44092 - 0 0 0 0 0 0 0 0 0 0 0 0
44093 - 0 0 0 0 0 0 0 0 0 0 0 0
44094 - 0 0 0 0 0 0 0 0 0 0 0 0
44095 - 0 0 0 0 0 0 0 0 0 0 0 0
44096 - 0 0 0 0 0 0 0 0 0 0 0 0
44097 - 0 0 0 0 0 0 0 0 0 0 0 0
44098 - 0 0 0 0 0 0 0 0 0 14 14 14
44099 - 34 34 34 66 66 66 78 78 78 6 6 6
44100 - 2 2 6 18 18 18 218 218 218 253 253 253
44101 -253 253 253 253 253 253 253 253 253 246 246 246
44102 -226 226 226 231 231 231 246 246 246 253 253 253
44103 -253 253 253 253 253 253 253 253 253 253 253 253
44104 -253 253 253 253 253 253 253 253 253 253 253 253
44105 -253 253 253 178 178 178 2 2 6 2 2 6
44106 - 2 2 6 2 2 6 2 2 6 2 2 6
44107 - 2 2 6 18 18 18 90 90 90 62 62 62
44108 - 30 30 30 10 10 10 0 0 0 0 0 0
44109 - 0 0 0 0 0 0 0 0 0 0 0 0
44110 - 0 0 0 0 0 0 0 0 0 0 0 0
44111 - 0 0 0 0 0 0 0 0 0 0 0 0
44112 - 0 0 0 0 0 0 0 0 0 0 0 0
44113 - 0 0 0 0 0 0 0 0 0 0 0 0
44114 - 0 0 0 0 0 0 0 0 0 0 0 0
44115 - 0 0 0 0 0 0 0 0 0 0 0 0
44116 - 0 0 0 0 0 0 0 0 0 0 0 0
44117 - 0 0 0 0 0 0 0 0 0 0 0 0
44118 - 0 0 0 0 0 0 10 10 10 26 26 26
44119 - 58 58 58 90 90 90 18 18 18 2 2 6
44120 - 2 2 6 110 110 110 253 253 253 253 253 253
44121 -253 253 253 253 253 253 253 253 253 253 253 253
44122 -250 250 250 253 253 253 253 253 253 253 253 253
44123 -253 253 253 253 253 253 253 253 253 253 253 253
44124 -253 253 253 253 253 253 253 253 253 253 253 253
44125 -253 253 253 231 231 231 18 18 18 2 2 6
44126 - 2 2 6 2 2 6 2 2 6 2 2 6
44127 - 2 2 6 2 2 6 18 18 18 94 94 94
44128 - 54 54 54 26 26 26 10 10 10 0 0 0
44129 - 0 0 0 0 0 0 0 0 0 0 0 0
44130 - 0 0 0 0 0 0 0 0 0 0 0 0
44131 - 0 0 0 0 0 0 0 0 0 0 0 0
44132 - 0 0 0 0 0 0 0 0 0 0 0 0
44133 - 0 0 0 0 0 0 0 0 0 0 0 0
44134 - 0 0 0 0 0 0 0 0 0 0 0 0
44135 - 0 0 0 0 0 0 0 0 0 0 0 0
44136 - 0 0 0 0 0 0 0 0 0 0 0 0
44137 - 0 0 0 0 0 0 0 0 0 0 0 0
44138 - 0 0 0 6 6 6 22 22 22 50 50 50
44139 - 90 90 90 26 26 26 2 2 6 2 2 6
44140 - 14 14 14 195 195 195 250 250 250 253 253 253
44141 -253 253 253 253 253 253 253 253 253 253 253 253
44142 -253 253 253 253 253 253 253 253 253 253 253 253
44143 -253 253 253 253 253 253 253 253 253 253 253 253
44144 -253 253 253 253 253 253 253 253 253 253 253 253
44145 -250 250 250 242 242 242 54 54 54 2 2 6
44146 - 2 2 6 2 2 6 2 2 6 2 2 6
44147 - 2 2 6 2 2 6 2 2 6 38 38 38
44148 - 86 86 86 50 50 50 22 22 22 6 6 6
44149 - 0 0 0 0 0 0 0 0 0 0 0 0
44150 - 0 0 0 0 0 0 0 0 0 0 0 0
44151 - 0 0 0 0 0 0 0 0 0 0 0 0
44152 - 0 0 0 0 0 0 0 0 0 0 0 0
44153 - 0 0 0 0 0 0 0 0 0 0 0 0
44154 - 0 0 0 0 0 0 0 0 0 0 0 0
44155 - 0 0 0 0 0 0 0 0 0 0 0 0
44156 - 0 0 0 0 0 0 0 0 0 0 0 0
44157 - 0 0 0 0 0 0 0 0 0 0 0 0
44158 - 6 6 6 14 14 14 38 38 38 82 82 82
44159 - 34 34 34 2 2 6 2 2 6 2 2 6
44160 - 42 42 42 195 195 195 246 246 246 253 253 253
44161 -253 253 253 253 253 253 253 253 253 250 250 250
44162 -242 242 242 242 242 242 250 250 250 253 253 253
44163 -253 253 253 253 253 253 253 253 253 253 253 253
44164 -253 253 253 250 250 250 246 246 246 238 238 238
44165 -226 226 226 231 231 231 101 101 101 6 6 6
44166 - 2 2 6 2 2 6 2 2 6 2 2 6
44167 - 2 2 6 2 2 6 2 2 6 2 2 6
44168 - 38 38 38 82 82 82 42 42 42 14 14 14
44169 - 6 6 6 0 0 0 0 0 0 0 0 0
44170 - 0 0 0 0 0 0 0 0 0 0 0 0
44171 - 0 0 0 0 0 0 0 0 0 0 0 0
44172 - 0 0 0 0 0 0 0 0 0 0 0 0
44173 - 0 0 0 0 0 0 0 0 0 0 0 0
44174 - 0 0 0 0 0 0 0 0 0 0 0 0
44175 - 0 0 0 0 0 0 0 0 0 0 0 0
44176 - 0 0 0 0 0 0 0 0 0 0 0 0
44177 - 0 0 0 0 0 0 0 0 0 0 0 0
44178 - 10 10 10 26 26 26 62 62 62 66 66 66
44179 - 2 2 6 2 2 6 2 2 6 6 6 6
44180 - 70 70 70 170 170 170 206 206 206 234 234 234
44181 -246 246 246 250 250 250 250 250 250 238 238 238
44182 -226 226 226 231 231 231 238 238 238 250 250 250
44183 -250 250 250 250 250 250 246 246 246 231 231 231
44184 -214 214 214 206 206 206 202 202 202 202 202 202
44185 -198 198 198 202 202 202 182 182 182 18 18 18
44186 - 2 2 6 2 2 6 2 2 6 2 2 6
44187 - 2 2 6 2 2 6 2 2 6 2 2 6
44188 - 2 2 6 62 62 62 66 66 66 30 30 30
44189 - 10 10 10 0 0 0 0 0 0 0 0 0
44190 - 0 0 0 0 0 0 0 0 0 0 0 0
44191 - 0 0 0 0 0 0 0 0 0 0 0 0
44192 - 0 0 0 0 0 0 0 0 0 0 0 0
44193 - 0 0 0 0 0 0 0 0 0 0 0 0
44194 - 0 0 0 0 0 0 0 0 0 0 0 0
44195 - 0 0 0 0 0 0 0 0 0 0 0 0
44196 - 0 0 0 0 0 0 0 0 0 0 0 0
44197 - 0 0 0 0 0 0 0 0 0 0 0 0
44198 - 14 14 14 42 42 42 82 82 82 18 18 18
44199 - 2 2 6 2 2 6 2 2 6 10 10 10
44200 - 94 94 94 182 182 182 218 218 218 242 242 242
44201 -250 250 250 253 253 253 253 253 253 250 250 250
44202 -234 234 234 253 253 253 253 253 253 253 253 253
44203 -253 253 253 253 253 253 253 253 253 246 246 246
44204 -238 238 238 226 226 226 210 210 210 202 202 202
44205 -195 195 195 195 195 195 210 210 210 158 158 158
44206 - 6 6 6 14 14 14 50 50 50 14 14 14
44207 - 2 2 6 2 2 6 2 2 6 2 2 6
44208 - 2 2 6 6 6 6 86 86 86 46 46 46
44209 - 18 18 18 6 6 6 0 0 0 0 0 0
44210 - 0 0 0 0 0 0 0 0 0 0 0 0
44211 - 0 0 0 0 0 0 0 0 0 0 0 0
44212 - 0 0 0 0 0 0 0 0 0 0 0 0
44213 - 0 0 0 0 0 0 0 0 0 0 0 0
44214 - 0 0 0 0 0 0 0 0 0 0 0 0
44215 - 0 0 0 0 0 0 0 0 0 0 0 0
44216 - 0 0 0 0 0 0 0 0 0 0 0 0
44217 - 0 0 0 0 0 0 0 0 0 6 6 6
44218 - 22 22 22 54 54 54 70 70 70 2 2 6
44219 - 2 2 6 10 10 10 2 2 6 22 22 22
44220 -166 166 166 231 231 231 250 250 250 253 253 253
44221 -253 253 253 253 253 253 253 253 253 250 250 250
44222 -242 242 242 253 253 253 253 253 253 253 253 253
44223 -253 253 253 253 253 253 253 253 253 253 253 253
44224 -253 253 253 253 253 253 253 253 253 246 246 246
44225 -231 231 231 206 206 206 198 198 198 226 226 226
44226 - 94 94 94 2 2 6 6 6 6 38 38 38
44227 - 30 30 30 2 2 6 2 2 6 2 2 6
44228 - 2 2 6 2 2 6 62 62 62 66 66 66
44229 - 26 26 26 10 10 10 0 0 0 0 0 0
44230 - 0 0 0 0 0 0 0 0 0 0 0 0
44231 - 0 0 0 0 0 0 0 0 0 0 0 0
44232 - 0 0 0 0 0 0 0 0 0 0 0 0
44233 - 0 0 0 0 0 0 0 0 0 0 0 0
44234 - 0 0 0 0 0 0 0 0 0 0 0 0
44235 - 0 0 0 0 0 0 0 0 0 0 0 0
44236 - 0 0 0 0 0 0 0 0 0 0 0 0
44237 - 0 0 0 0 0 0 0 0 0 10 10 10
44238 - 30 30 30 74 74 74 50 50 50 2 2 6
44239 - 26 26 26 26 26 26 2 2 6 106 106 106
44240 -238 238 238 253 253 253 253 253 253 253 253 253
44241 -253 253 253 253 253 253 253 253 253 253 253 253
44242 -253 253 253 253 253 253 253 253 253 253 253 253
44243 -253 253 253 253 253 253 253 253 253 253 253 253
44244 -253 253 253 253 253 253 253 253 253 253 253 253
44245 -253 253 253 246 246 246 218 218 218 202 202 202
44246 -210 210 210 14 14 14 2 2 6 2 2 6
44247 - 30 30 30 22 22 22 2 2 6 2 2 6
44248 - 2 2 6 2 2 6 18 18 18 86 86 86
44249 - 42 42 42 14 14 14 0 0 0 0 0 0
44250 - 0 0 0 0 0 0 0 0 0 0 0 0
44251 - 0 0 0 0 0 0 0 0 0 0 0 0
44252 - 0 0 0 0 0 0 0 0 0 0 0 0
44253 - 0 0 0 0 0 0 0 0 0 0 0 0
44254 - 0 0 0 0 0 0 0 0 0 0 0 0
44255 - 0 0 0 0 0 0 0 0 0 0 0 0
44256 - 0 0 0 0 0 0 0 0 0 0 0 0
44257 - 0 0 0 0 0 0 0 0 0 14 14 14
44258 - 42 42 42 90 90 90 22 22 22 2 2 6
44259 - 42 42 42 2 2 6 18 18 18 218 218 218
44260 -253 253 253 253 253 253 253 253 253 253 253 253
44261 -253 253 253 253 253 253 253 253 253 253 253 253
44262 -253 253 253 253 253 253 253 253 253 253 253 253
44263 -253 253 253 253 253 253 253 253 253 253 253 253
44264 -253 253 253 253 253 253 253 253 253 253 253 253
44265 -253 253 253 253 253 253 250 250 250 221 221 221
44266 -218 218 218 101 101 101 2 2 6 14 14 14
44267 - 18 18 18 38 38 38 10 10 10 2 2 6
44268 - 2 2 6 2 2 6 2 2 6 78 78 78
44269 - 58 58 58 22 22 22 6 6 6 0 0 0
44270 - 0 0 0 0 0 0 0 0 0 0 0 0
44271 - 0 0 0 0 0 0 0 0 0 0 0 0
44272 - 0 0 0 0 0 0 0 0 0 0 0 0
44273 - 0 0 0 0 0 0 0 0 0 0 0 0
44274 - 0 0 0 0 0 0 0 0 0 0 0 0
44275 - 0 0 0 0 0 0 0 0 0 0 0 0
44276 - 0 0 0 0 0 0 0 0 0 0 0 0
44277 - 0 0 0 0 0 0 6 6 6 18 18 18
44278 - 54 54 54 82 82 82 2 2 6 26 26 26
44279 - 22 22 22 2 2 6 123 123 123 253 253 253
44280 -253 253 253 253 253 253 253 253 253 253 253 253
44281 -253 253 253 253 253 253 253 253 253 253 253 253
44282 -253 253 253 253 253 253 253 253 253 253 253 253
44283 -253 253 253 253 253 253 253 253 253 253 253 253
44284 -253 253 253 253 253 253 253 253 253 253 253 253
44285 -253 253 253 253 253 253 253 253 253 250 250 250
44286 -238 238 238 198 198 198 6 6 6 38 38 38
44287 - 58 58 58 26 26 26 38 38 38 2 2 6
44288 - 2 2 6 2 2 6 2 2 6 46 46 46
44289 - 78 78 78 30 30 30 10 10 10 0 0 0
44290 - 0 0 0 0 0 0 0 0 0 0 0 0
44291 - 0 0 0 0 0 0 0 0 0 0 0 0
44292 - 0 0 0 0 0 0 0 0 0 0 0 0
44293 - 0 0 0 0 0 0 0 0 0 0 0 0
44294 - 0 0 0 0 0 0 0 0 0 0 0 0
44295 - 0 0 0 0 0 0 0 0 0 0 0 0
44296 - 0 0 0 0 0 0 0 0 0 0 0 0
44297 - 0 0 0 0 0 0 10 10 10 30 30 30
44298 - 74 74 74 58 58 58 2 2 6 42 42 42
44299 - 2 2 6 22 22 22 231 231 231 253 253 253
44300 -253 253 253 253 253 253 253 253 253 253 253 253
44301 -253 253 253 253 253 253 253 253 253 250 250 250
44302 -253 253 253 253 253 253 253 253 253 253 253 253
44303 -253 253 253 253 253 253 253 253 253 253 253 253
44304 -253 253 253 253 253 253 253 253 253 253 253 253
44305 -253 253 253 253 253 253 253 253 253 253 253 253
44306 -253 253 253 246 246 246 46 46 46 38 38 38
44307 - 42 42 42 14 14 14 38 38 38 14 14 14
44308 - 2 2 6 2 2 6 2 2 6 6 6 6
44309 - 86 86 86 46 46 46 14 14 14 0 0 0
44310 - 0 0 0 0 0 0 0 0 0 0 0 0
44311 - 0 0 0 0 0 0 0 0 0 0 0 0
44312 - 0 0 0 0 0 0 0 0 0 0 0 0
44313 - 0 0 0 0 0 0 0 0 0 0 0 0
44314 - 0 0 0 0 0 0 0 0 0 0 0 0
44315 - 0 0 0 0 0 0 0 0 0 0 0 0
44316 - 0 0 0 0 0 0 0 0 0 0 0 0
44317 - 0 0 0 6 6 6 14 14 14 42 42 42
44318 - 90 90 90 18 18 18 18 18 18 26 26 26
44319 - 2 2 6 116 116 116 253 253 253 253 253 253
44320 -253 253 253 253 253 253 253 253 253 253 253 253
44321 -253 253 253 253 253 253 250 250 250 238 238 238
44322 -253 253 253 253 253 253 253 253 253 253 253 253
44323 -253 253 253 253 253 253 253 253 253 253 253 253
44324 -253 253 253 253 253 253 253 253 253 253 253 253
44325 -253 253 253 253 253 253 253 253 253 253 253 253
44326 -253 253 253 253 253 253 94 94 94 6 6 6
44327 - 2 2 6 2 2 6 10 10 10 34 34 34
44328 - 2 2 6 2 2 6 2 2 6 2 2 6
44329 - 74 74 74 58 58 58 22 22 22 6 6 6
44330 - 0 0 0 0 0 0 0 0 0 0 0 0
44331 - 0 0 0 0 0 0 0 0 0 0 0 0
44332 - 0 0 0 0 0 0 0 0 0 0 0 0
44333 - 0 0 0 0 0 0 0 0 0 0 0 0
44334 - 0 0 0 0 0 0 0 0 0 0 0 0
44335 - 0 0 0 0 0 0 0 0 0 0 0 0
44336 - 0 0 0 0 0 0 0 0 0 0 0 0
44337 - 0 0 0 10 10 10 26 26 26 66 66 66
44338 - 82 82 82 2 2 6 38 38 38 6 6 6
44339 - 14 14 14 210 210 210 253 253 253 253 253 253
44340 -253 253 253 253 253 253 253 253 253 253 253 253
44341 -253 253 253 253 253 253 246 246 246 242 242 242
44342 -253 253 253 253 253 253 253 253 253 253 253 253
44343 -253 253 253 253 253 253 253 253 253 253 253 253
44344 -253 253 253 253 253 253 253 253 253 253 253 253
44345 -253 253 253 253 253 253 253 253 253 253 253 253
44346 -253 253 253 253 253 253 144 144 144 2 2 6
44347 - 2 2 6 2 2 6 2 2 6 46 46 46
44348 - 2 2 6 2 2 6 2 2 6 2 2 6
44349 - 42 42 42 74 74 74 30 30 30 10 10 10
44350 - 0 0 0 0 0 0 0 0 0 0 0 0
44351 - 0 0 0 0 0 0 0 0 0 0 0 0
44352 - 0 0 0 0 0 0 0 0 0 0 0 0
44353 - 0 0 0 0 0 0 0 0 0 0 0 0
44354 - 0 0 0 0 0 0 0 0 0 0 0 0
44355 - 0 0 0 0 0 0 0 0 0 0 0 0
44356 - 0 0 0 0 0 0 0 0 0 0 0 0
44357 - 6 6 6 14 14 14 42 42 42 90 90 90
44358 - 26 26 26 6 6 6 42 42 42 2 2 6
44359 - 74 74 74 250 250 250 253 253 253 253 253 253
44360 -253 253 253 253 253 253 253 253 253 253 253 253
44361 -253 253 253 253 253 253 242 242 242 242 242 242
44362 -253 253 253 253 253 253 253 253 253 253 253 253
44363 -253 253 253 253 253 253 253 253 253 253 253 253
44364 -253 253 253 253 253 253 253 253 253 253 253 253
44365 -253 253 253 253 253 253 253 253 253 253 253 253
44366 -253 253 253 253 253 253 182 182 182 2 2 6
44367 - 2 2 6 2 2 6 2 2 6 46 46 46
44368 - 2 2 6 2 2 6 2 2 6 2 2 6
44369 - 10 10 10 86 86 86 38 38 38 10 10 10
44370 - 0 0 0 0 0 0 0 0 0 0 0 0
44371 - 0 0 0 0 0 0 0 0 0 0 0 0
44372 - 0 0 0 0 0 0 0 0 0 0 0 0
44373 - 0 0 0 0 0 0 0 0 0 0 0 0
44374 - 0 0 0 0 0 0 0 0 0 0 0 0
44375 - 0 0 0 0 0 0 0 0 0 0 0 0
44376 - 0 0 0 0 0 0 0 0 0 0 0 0
44377 - 10 10 10 26 26 26 66 66 66 82 82 82
44378 - 2 2 6 22 22 22 18 18 18 2 2 6
44379 -149 149 149 253 253 253 253 253 253 253 253 253
44380 -253 253 253 253 253 253 253 253 253 253 253 253
44381 -253 253 253 253 253 253 234 234 234 242 242 242
44382 -253 253 253 253 253 253 253 253 253 253 253 253
44383 -253 253 253 253 253 253 253 253 253 253 253 253
44384 -253 253 253 253 253 253 253 253 253 253 253 253
44385 -253 253 253 253 253 253 253 253 253 253 253 253
44386 -253 253 253 253 253 253 206 206 206 2 2 6
44387 - 2 2 6 2 2 6 2 2 6 38 38 38
44388 - 2 2 6 2 2 6 2 2 6 2 2 6
44389 - 6 6 6 86 86 86 46 46 46 14 14 14
44390 - 0 0 0 0 0 0 0 0 0 0 0 0
44391 - 0 0 0 0 0 0 0 0 0 0 0 0
44392 - 0 0 0 0 0 0 0 0 0 0 0 0
44393 - 0 0 0 0 0 0 0 0 0 0 0 0
44394 - 0 0 0 0 0 0 0 0 0 0 0 0
44395 - 0 0 0 0 0 0 0 0 0 0 0 0
44396 - 0 0 0 0 0 0 0 0 0 6 6 6
44397 - 18 18 18 46 46 46 86 86 86 18 18 18
44398 - 2 2 6 34 34 34 10 10 10 6 6 6
44399 -210 210 210 253 253 253 253 253 253 253 253 253
44400 -253 253 253 253 253 253 253 253 253 253 253 253
44401 -253 253 253 253 253 253 234 234 234 242 242 242
44402 -253 253 253 253 253 253 253 253 253 253 253 253
44403 -253 253 253 253 253 253 253 253 253 253 253 253
44404 -253 253 253 253 253 253 253 253 253 253 253 253
44405 -253 253 253 253 253 253 253 253 253 253 253 253
44406 -253 253 253 253 253 253 221 221 221 6 6 6
44407 - 2 2 6 2 2 6 6 6 6 30 30 30
44408 - 2 2 6 2 2 6 2 2 6 2 2 6
44409 - 2 2 6 82 82 82 54 54 54 18 18 18
44410 - 6 6 6 0 0 0 0 0 0 0 0 0
44411 - 0 0 0 0 0 0 0 0 0 0 0 0
44412 - 0 0 0 0 0 0 0 0 0 0 0 0
44413 - 0 0 0 0 0 0 0 0 0 0 0 0
44414 - 0 0 0 0 0 0 0 0 0 0 0 0
44415 - 0 0 0 0 0 0 0 0 0 0 0 0
44416 - 0 0 0 0 0 0 0 0 0 10 10 10
44417 - 26 26 26 66 66 66 62 62 62 2 2 6
44418 - 2 2 6 38 38 38 10 10 10 26 26 26
44419 -238 238 238 253 253 253 253 253 253 253 253 253
44420 -253 253 253 253 253 253 253 253 253 253 253 253
44421 -253 253 253 253 253 253 231 231 231 238 238 238
44422 -253 253 253 253 253 253 253 253 253 253 253 253
44423 -253 253 253 253 253 253 253 253 253 253 253 253
44424 -253 253 253 253 253 253 253 253 253 253 253 253
44425 -253 253 253 253 253 253 253 253 253 253 253 253
44426 -253 253 253 253 253 253 231 231 231 6 6 6
44427 - 2 2 6 2 2 6 10 10 10 30 30 30
44428 - 2 2 6 2 2 6 2 2 6 2 2 6
44429 - 2 2 6 66 66 66 58 58 58 22 22 22
44430 - 6 6 6 0 0 0 0 0 0 0 0 0
44431 - 0 0 0 0 0 0 0 0 0 0 0 0
44432 - 0 0 0 0 0 0 0 0 0 0 0 0
44433 - 0 0 0 0 0 0 0 0 0 0 0 0
44434 - 0 0 0 0 0 0 0 0 0 0 0 0
44435 - 0 0 0 0 0 0 0 0 0 0 0 0
44436 - 0 0 0 0 0 0 0 0 0 10 10 10
44437 - 38 38 38 78 78 78 6 6 6 2 2 6
44438 - 2 2 6 46 46 46 14 14 14 42 42 42
44439 -246 246 246 253 253 253 253 253 253 253 253 253
44440 -253 253 253 253 253 253 253 253 253 253 253 253
44441 -253 253 253 253 253 253 231 231 231 242 242 242
44442 -253 253 253 253 253 253 253 253 253 253 253 253
44443 -253 253 253 253 253 253 253 253 253 253 253 253
44444 -253 253 253 253 253 253 253 253 253 253 253 253
44445 -253 253 253 253 253 253 253 253 253 253 253 253
44446 -253 253 253 253 253 253 234 234 234 10 10 10
44447 - 2 2 6 2 2 6 22 22 22 14 14 14
44448 - 2 2 6 2 2 6 2 2 6 2 2 6
44449 - 2 2 6 66 66 66 62 62 62 22 22 22
44450 - 6 6 6 0 0 0 0 0 0 0 0 0
44451 - 0 0 0 0 0 0 0 0 0 0 0 0
44452 - 0 0 0 0 0 0 0 0 0 0 0 0
44453 - 0 0 0 0 0 0 0 0 0 0 0 0
44454 - 0 0 0 0 0 0 0 0 0 0 0 0
44455 - 0 0 0 0 0 0 0 0 0 0 0 0
44456 - 0 0 0 0 0 0 6 6 6 18 18 18
44457 - 50 50 50 74 74 74 2 2 6 2 2 6
44458 - 14 14 14 70 70 70 34 34 34 62 62 62
44459 -250 250 250 253 253 253 253 253 253 253 253 253
44460 -253 253 253 253 253 253 253 253 253 253 253 253
44461 -253 253 253 253 253 253 231 231 231 246 246 246
44462 -253 253 253 253 253 253 253 253 253 253 253 253
44463 -253 253 253 253 253 253 253 253 253 253 253 253
44464 -253 253 253 253 253 253 253 253 253 253 253 253
44465 -253 253 253 253 253 253 253 253 253 253 253 253
44466 -253 253 253 253 253 253 234 234 234 14 14 14
44467 - 2 2 6 2 2 6 30 30 30 2 2 6
44468 - 2 2 6 2 2 6 2 2 6 2 2 6
44469 - 2 2 6 66 66 66 62 62 62 22 22 22
44470 - 6 6 6 0 0 0 0 0 0 0 0 0
44471 - 0 0 0 0 0 0 0 0 0 0 0 0
44472 - 0 0 0 0 0 0 0 0 0 0 0 0
44473 - 0 0 0 0 0 0 0 0 0 0 0 0
44474 - 0 0 0 0 0 0 0 0 0 0 0 0
44475 - 0 0 0 0 0 0 0 0 0 0 0 0
44476 - 0 0 0 0 0 0 6 6 6 18 18 18
44477 - 54 54 54 62 62 62 2 2 6 2 2 6
44478 - 2 2 6 30 30 30 46 46 46 70 70 70
44479 -250 250 250 253 253 253 253 253 253 253 253 253
44480 -253 253 253 253 253 253 253 253 253 253 253 253
44481 -253 253 253 253 253 253 231 231 231 246 246 246
44482 -253 253 253 253 253 253 253 253 253 253 253 253
44483 -253 253 253 253 253 253 253 253 253 253 253 253
44484 -253 253 253 253 253 253 253 253 253 253 253 253
44485 -253 253 253 253 253 253 253 253 253 253 253 253
44486 -253 253 253 253 253 253 226 226 226 10 10 10
44487 - 2 2 6 6 6 6 30 30 30 2 2 6
44488 - 2 2 6 2 2 6 2 2 6 2 2 6
44489 - 2 2 6 66 66 66 58 58 58 22 22 22
44490 - 6 6 6 0 0 0 0 0 0 0 0 0
44491 - 0 0 0 0 0 0 0 0 0 0 0 0
44492 - 0 0 0 0 0 0 0 0 0 0 0 0
44493 - 0 0 0 0 0 0 0 0 0 0 0 0
44494 - 0 0 0 0 0 0 0 0 0 0 0 0
44495 - 0 0 0 0 0 0 0 0 0 0 0 0
44496 - 0 0 0 0 0 0 6 6 6 22 22 22
44497 - 58 58 58 62 62 62 2 2 6 2 2 6
44498 - 2 2 6 2 2 6 30 30 30 78 78 78
44499 -250 250 250 253 253 253 253 253 253 253 253 253
44500 -253 253 253 253 253 253 253 253 253 253 253 253
44501 -253 253 253 253 253 253 231 231 231 246 246 246
44502 -253 253 253 253 253 253 253 253 253 253 253 253
44503 -253 253 253 253 253 253 253 253 253 253 253 253
44504 -253 253 253 253 253 253 253 253 253 253 253 253
44505 -253 253 253 253 253 253 253 253 253 253 253 253
44506 -253 253 253 253 253 253 206 206 206 2 2 6
44507 - 22 22 22 34 34 34 18 14 6 22 22 22
44508 - 26 26 26 18 18 18 6 6 6 2 2 6
44509 - 2 2 6 82 82 82 54 54 54 18 18 18
44510 - 6 6 6 0 0 0 0 0 0 0 0 0
44511 - 0 0 0 0 0 0 0 0 0 0 0 0
44512 - 0 0 0 0 0 0 0 0 0 0 0 0
44513 - 0 0 0 0 0 0 0 0 0 0 0 0
44514 - 0 0 0 0 0 0 0 0 0 0 0 0
44515 - 0 0 0 0 0 0 0 0 0 0 0 0
44516 - 0 0 0 0 0 0 6 6 6 26 26 26
44517 - 62 62 62 106 106 106 74 54 14 185 133 11
44518 -210 162 10 121 92 8 6 6 6 62 62 62
44519 -238 238 238 253 253 253 253 253 253 253 253 253
44520 -253 253 253 253 253 253 253 253 253 253 253 253
44521 -253 253 253 253 253 253 231 231 231 246 246 246
44522 -253 253 253 253 253 253 253 253 253 253 253 253
44523 -253 253 253 253 253 253 253 253 253 253 253 253
44524 -253 253 253 253 253 253 253 253 253 253 253 253
44525 -253 253 253 253 253 253 253 253 253 253 253 253
44526 -253 253 253 253 253 253 158 158 158 18 18 18
44527 - 14 14 14 2 2 6 2 2 6 2 2 6
44528 - 6 6 6 18 18 18 66 66 66 38 38 38
44529 - 6 6 6 94 94 94 50 50 50 18 18 18
44530 - 6 6 6 0 0 0 0 0 0 0 0 0
44531 - 0 0 0 0 0 0 0 0 0 0 0 0
44532 - 0 0 0 0 0 0 0 0 0 0 0 0
44533 - 0 0 0 0 0 0 0 0 0 0 0 0
44534 - 0 0 0 0 0 0 0 0 0 0 0 0
44535 - 0 0 0 0 0 0 0 0 0 6 6 6
44536 - 10 10 10 10 10 10 18 18 18 38 38 38
44537 - 78 78 78 142 134 106 216 158 10 242 186 14
44538 -246 190 14 246 190 14 156 118 10 10 10 10
44539 - 90 90 90 238 238 238 253 253 253 253 253 253
44540 -253 253 253 253 253 253 253 253 253 253 253 253
44541 -253 253 253 253 253 253 231 231 231 250 250 250
44542 -253 253 253 253 253 253 253 253 253 253 253 253
44543 -253 253 253 253 253 253 253 253 253 253 253 253
44544 -253 253 253 253 253 253 253 253 253 253 253 253
44545 -253 253 253 253 253 253 253 253 253 246 230 190
44546 -238 204 91 238 204 91 181 142 44 37 26 9
44547 - 2 2 6 2 2 6 2 2 6 2 2 6
44548 - 2 2 6 2 2 6 38 38 38 46 46 46
44549 - 26 26 26 106 106 106 54 54 54 18 18 18
44550 - 6 6 6 0 0 0 0 0 0 0 0 0
44551 - 0 0 0 0 0 0 0 0 0 0 0 0
44552 - 0 0 0 0 0 0 0 0 0 0 0 0
44553 - 0 0 0 0 0 0 0 0 0 0 0 0
44554 - 0 0 0 0 0 0 0 0 0 0 0 0
44555 - 0 0 0 6 6 6 14 14 14 22 22 22
44556 - 30 30 30 38 38 38 50 50 50 70 70 70
44557 -106 106 106 190 142 34 226 170 11 242 186 14
44558 -246 190 14 246 190 14 246 190 14 154 114 10
44559 - 6 6 6 74 74 74 226 226 226 253 253 253
44560 -253 253 253 253 253 253 253 253 253 253 253 253
44561 -253 253 253 253 253 253 231 231 231 250 250 250
44562 -253 253 253 253 253 253 253 253 253 253 253 253
44563 -253 253 253 253 253 253 253 253 253 253 253 253
44564 -253 253 253 253 253 253 253 253 253 253 253 253
44565 -253 253 253 253 253 253 253 253 253 228 184 62
44566 -241 196 14 241 208 19 232 195 16 38 30 10
44567 - 2 2 6 2 2 6 2 2 6 2 2 6
44568 - 2 2 6 6 6 6 30 30 30 26 26 26
44569 -203 166 17 154 142 90 66 66 66 26 26 26
44570 - 6 6 6 0 0 0 0 0 0 0 0 0
44571 - 0 0 0 0 0 0 0 0 0 0 0 0
44572 - 0 0 0 0 0 0 0 0 0 0 0 0
44573 - 0 0 0 0 0 0 0 0 0 0 0 0
44574 - 0 0 0 0 0 0 0 0 0 0 0 0
44575 - 6 6 6 18 18 18 38 38 38 58 58 58
44576 - 78 78 78 86 86 86 101 101 101 123 123 123
44577 -175 146 61 210 150 10 234 174 13 246 186 14
44578 -246 190 14 246 190 14 246 190 14 238 190 10
44579 -102 78 10 2 2 6 46 46 46 198 198 198
44580 -253 253 253 253 253 253 253 253 253 253 253 253
44581 -253 253 253 253 253 253 234 234 234 242 242 242
44582 -253 253 253 253 253 253 253 253 253 253 253 253
44583 -253 253 253 253 253 253 253 253 253 253 253 253
44584 -253 253 253 253 253 253 253 253 253 253 253 253
44585 -253 253 253 253 253 253 253 253 253 224 178 62
44586 -242 186 14 241 196 14 210 166 10 22 18 6
44587 - 2 2 6 2 2 6 2 2 6 2 2 6
44588 - 2 2 6 2 2 6 6 6 6 121 92 8
44589 -238 202 15 232 195 16 82 82 82 34 34 34
44590 - 10 10 10 0 0 0 0 0 0 0 0 0
44591 - 0 0 0 0 0 0 0 0 0 0 0 0
44592 - 0 0 0 0 0 0 0 0 0 0 0 0
44593 - 0 0 0 0 0 0 0 0 0 0 0 0
44594 - 0 0 0 0 0 0 0 0 0 0 0 0
44595 - 14 14 14 38 38 38 70 70 70 154 122 46
44596 -190 142 34 200 144 11 197 138 11 197 138 11
44597 -213 154 11 226 170 11 242 186 14 246 190 14
44598 -246 190 14 246 190 14 246 190 14 246 190 14
44599 -225 175 15 46 32 6 2 2 6 22 22 22
44600 -158 158 158 250 250 250 253 253 253 253 253 253
44601 -253 253 253 253 253 253 253 253 253 253 253 253
44602 -253 253 253 253 253 253 253 253 253 253 253 253
44603 -253 253 253 253 253 253 253 253 253 253 253 253
44604 -253 253 253 253 253 253 253 253 253 253 253 253
44605 -253 253 253 250 250 250 242 242 242 224 178 62
44606 -239 182 13 236 186 11 213 154 11 46 32 6
44607 - 2 2 6 2 2 6 2 2 6 2 2 6
44608 - 2 2 6 2 2 6 61 42 6 225 175 15
44609 -238 190 10 236 186 11 112 100 78 42 42 42
44610 - 14 14 14 0 0 0 0 0 0 0 0 0
44611 - 0 0 0 0 0 0 0 0 0 0 0 0
44612 - 0 0 0 0 0 0 0 0 0 0 0 0
44613 - 0 0 0 0 0 0 0 0 0 0 0 0
44614 - 0 0 0 0 0 0 0 0 0 6 6 6
44615 - 22 22 22 54 54 54 154 122 46 213 154 11
44616 -226 170 11 230 174 11 226 170 11 226 170 11
44617 -236 178 12 242 186 14 246 190 14 246 190 14
44618 -246 190 14 246 190 14 246 190 14 246 190 14
44619 -241 196 14 184 144 12 10 10 10 2 2 6
44620 - 6 6 6 116 116 116 242 242 242 253 253 253
44621 -253 253 253 253 253 253 253 253 253 253 253 253
44622 -253 253 253 253 253 253 253 253 253 253 253 253
44623 -253 253 253 253 253 253 253 253 253 253 253 253
44624 -253 253 253 253 253 253 253 253 253 253 253 253
44625 -253 253 253 231 231 231 198 198 198 214 170 54
44626 -236 178 12 236 178 12 210 150 10 137 92 6
44627 - 18 14 6 2 2 6 2 2 6 2 2 6
44628 - 6 6 6 70 47 6 200 144 11 236 178 12
44629 -239 182 13 239 182 13 124 112 88 58 58 58
44630 - 22 22 22 6 6 6 0 0 0 0 0 0
44631 - 0 0 0 0 0 0 0 0 0 0 0 0
44632 - 0 0 0 0 0 0 0 0 0 0 0 0
44633 - 0 0 0 0 0 0 0 0 0 0 0 0
44634 - 0 0 0 0 0 0 0 0 0 10 10 10
44635 - 30 30 30 70 70 70 180 133 36 226 170 11
44636 -239 182 13 242 186 14 242 186 14 246 186 14
44637 -246 190 14 246 190 14 246 190 14 246 190 14
44638 -246 190 14 246 190 14 246 190 14 246 190 14
44639 -246 190 14 232 195 16 98 70 6 2 2 6
44640 - 2 2 6 2 2 6 66 66 66 221 221 221
44641 -253 253 253 253 253 253 253 253 253 253 253 253
44642 -253 253 253 253 253 253 253 253 253 253 253 253
44643 -253 253 253 253 253 253 253 253 253 253 253 253
44644 -253 253 253 253 253 253 253 253 253 253 253 253
44645 -253 253 253 206 206 206 198 198 198 214 166 58
44646 -230 174 11 230 174 11 216 158 10 192 133 9
44647 -163 110 8 116 81 8 102 78 10 116 81 8
44648 -167 114 7 197 138 11 226 170 11 239 182 13
44649 -242 186 14 242 186 14 162 146 94 78 78 78
44650 - 34 34 34 14 14 14 6 6 6 0 0 0
44651 - 0 0 0 0 0 0 0 0 0 0 0 0
44652 - 0 0 0 0 0 0 0 0 0 0 0 0
44653 - 0 0 0 0 0 0 0 0 0 0 0 0
44654 - 0 0 0 0 0 0 0 0 0 6 6 6
44655 - 30 30 30 78 78 78 190 142 34 226 170 11
44656 -239 182 13 246 190 14 246 190 14 246 190 14
44657 -246 190 14 246 190 14 246 190 14 246 190 14
44658 -246 190 14 246 190 14 246 190 14 246 190 14
44659 -246 190 14 241 196 14 203 166 17 22 18 6
44660 - 2 2 6 2 2 6 2 2 6 38 38 38
44661 -218 218 218 253 253 253 253 253 253 253 253 253
44662 -253 253 253 253 253 253 253 253 253 253 253 253
44663 -253 253 253 253 253 253 253 253 253 253 253 253
44664 -253 253 253 253 253 253 253 253 253 253 253 253
44665 -250 250 250 206 206 206 198 198 198 202 162 69
44666 -226 170 11 236 178 12 224 166 10 210 150 10
44667 -200 144 11 197 138 11 192 133 9 197 138 11
44668 -210 150 10 226 170 11 242 186 14 246 190 14
44669 -246 190 14 246 186 14 225 175 15 124 112 88
44670 - 62 62 62 30 30 30 14 14 14 6 6 6
44671 - 0 0 0 0 0 0 0 0 0 0 0 0
44672 - 0 0 0 0 0 0 0 0 0 0 0 0
44673 - 0 0 0 0 0 0 0 0 0 0 0 0
44674 - 0 0 0 0 0 0 0 0 0 10 10 10
44675 - 30 30 30 78 78 78 174 135 50 224 166 10
44676 -239 182 13 246 190 14 246 190 14 246 190 14
44677 -246 190 14 246 190 14 246 190 14 246 190 14
44678 -246 190 14 246 190 14 246 190 14 246 190 14
44679 -246 190 14 246 190 14 241 196 14 139 102 15
44680 - 2 2 6 2 2 6 2 2 6 2 2 6
44681 - 78 78 78 250 250 250 253 253 253 253 253 253
44682 -253 253 253 253 253 253 253 253 253 253 253 253
44683 -253 253 253 253 253 253 253 253 253 253 253 253
44684 -253 253 253 253 253 253 253 253 253 253 253 253
44685 -250 250 250 214 214 214 198 198 198 190 150 46
44686 -219 162 10 236 178 12 234 174 13 224 166 10
44687 -216 158 10 213 154 11 213 154 11 216 158 10
44688 -226 170 11 239 182 13 246 190 14 246 190 14
44689 -246 190 14 246 190 14 242 186 14 206 162 42
44690 -101 101 101 58 58 58 30 30 30 14 14 14
44691 - 6 6 6 0 0 0 0 0 0 0 0 0
44692 - 0 0 0 0 0 0 0 0 0 0 0 0
44693 - 0 0 0 0 0 0 0 0 0 0 0 0
44694 - 0 0 0 0 0 0 0 0 0 10 10 10
44695 - 30 30 30 74 74 74 174 135 50 216 158 10
44696 -236 178 12 246 190 14 246 190 14 246 190 14
44697 -246 190 14 246 190 14 246 190 14 246 190 14
44698 -246 190 14 246 190 14 246 190 14 246 190 14
44699 -246 190 14 246 190 14 241 196 14 226 184 13
44700 - 61 42 6 2 2 6 2 2 6 2 2 6
44701 - 22 22 22 238 238 238 253 253 253 253 253 253
44702 -253 253 253 253 253 253 253 253 253 253 253 253
44703 -253 253 253 253 253 253 253 253 253 253 253 253
44704 -253 253 253 253 253 253 253 253 253 253 253 253
44705 -253 253 253 226 226 226 187 187 187 180 133 36
44706 -216 158 10 236 178 12 239 182 13 236 178 12
44707 -230 174 11 226 170 11 226 170 11 230 174 11
44708 -236 178 12 242 186 14 246 190 14 246 190 14
44709 -246 190 14 246 190 14 246 186 14 239 182 13
44710 -206 162 42 106 106 106 66 66 66 34 34 34
44711 - 14 14 14 6 6 6 0 0 0 0 0 0
44712 - 0 0 0 0 0 0 0 0 0 0 0 0
44713 - 0 0 0 0 0 0 0 0 0 0 0 0
44714 - 0 0 0 0 0 0 0 0 0 6 6 6
44715 - 26 26 26 70 70 70 163 133 67 213 154 11
44716 -236 178 12 246 190 14 246 190 14 246 190 14
44717 -246 190 14 246 190 14 246 190 14 246 190 14
44718 -246 190 14 246 190 14 246 190 14 246 190 14
44719 -246 190 14 246 190 14 246 190 14 241 196 14
44720 -190 146 13 18 14 6 2 2 6 2 2 6
44721 - 46 46 46 246 246 246 253 253 253 253 253 253
44722 -253 253 253 253 253 253 253 253 253 253 253 253
44723 -253 253 253 253 253 253 253 253 253 253 253 253
44724 -253 253 253 253 253 253 253 253 253 253 253 253
44725 -253 253 253 221 221 221 86 86 86 156 107 11
44726 -216 158 10 236 178 12 242 186 14 246 186 14
44727 -242 186 14 239 182 13 239 182 13 242 186 14
44728 -242 186 14 246 186 14 246 190 14 246 190 14
44729 -246 190 14 246 190 14 246 190 14 246 190 14
44730 -242 186 14 225 175 15 142 122 72 66 66 66
44731 - 30 30 30 10 10 10 0 0 0 0 0 0
44732 - 0 0 0 0 0 0 0 0 0 0 0 0
44733 - 0 0 0 0 0 0 0 0 0 0 0 0
44734 - 0 0 0 0 0 0 0 0 0 6 6 6
44735 - 26 26 26 70 70 70 163 133 67 210 150 10
44736 -236 178 12 246 190 14 246 190 14 246 190 14
44737 -246 190 14 246 190 14 246 190 14 246 190 14
44738 -246 190 14 246 190 14 246 190 14 246 190 14
44739 -246 190 14 246 190 14 246 190 14 246 190 14
44740 -232 195 16 121 92 8 34 34 34 106 106 106
44741 -221 221 221 253 253 253 253 253 253 253 253 253
44742 -253 253 253 253 253 253 253 253 253 253 253 253
44743 -253 253 253 253 253 253 253 253 253 253 253 253
44744 -253 253 253 253 253 253 253 253 253 253 253 253
44745 -242 242 242 82 82 82 18 14 6 163 110 8
44746 -216 158 10 236 178 12 242 186 14 246 190 14
44747 -246 190 14 246 190 14 246 190 14 246 190 14
44748 -246 190 14 246 190 14 246 190 14 246 190 14
44749 -246 190 14 246 190 14 246 190 14 246 190 14
44750 -246 190 14 246 190 14 242 186 14 163 133 67
44751 - 46 46 46 18 18 18 6 6 6 0 0 0
44752 - 0 0 0 0 0 0 0 0 0 0 0 0
44753 - 0 0 0 0 0 0 0 0 0 0 0 0
44754 - 0 0 0 0 0 0 0 0 0 10 10 10
44755 - 30 30 30 78 78 78 163 133 67 210 150 10
44756 -236 178 12 246 186 14 246 190 14 246 190 14
44757 -246 190 14 246 190 14 246 190 14 246 190 14
44758 -246 190 14 246 190 14 246 190 14 246 190 14
44759 -246 190 14 246 190 14 246 190 14 246 190 14
44760 -241 196 14 215 174 15 190 178 144 253 253 253
44761 -253 253 253 253 253 253 253 253 253 253 253 253
44762 -253 253 253 253 253 253 253 253 253 253 253 253
44763 -253 253 253 253 253 253 253 253 253 253 253 253
44764 -253 253 253 253 253 253 253 253 253 218 218 218
44765 - 58 58 58 2 2 6 22 18 6 167 114 7
44766 -216 158 10 236 178 12 246 186 14 246 190 14
44767 -246 190 14 246 190 14 246 190 14 246 190 14
44768 -246 190 14 246 190 14 246 190 14 246 190 14
44769 -246 190 14 246 190 14 246 190 14 246 190 14
44770 -246 190 14 246 186 14 242 186 14 190 150 46
44771 - 54 54 54 22 22 22 6 6 6 0 0 0
44772 - 0 0 0 0 0 0 0 0 0 0 0 0
44773 - 0 0 0 0 0 0 0 0 0 0 0 0
44774 - 0 0 0 0 0 0 0 0 0 14 14 14
44775 - 38 38 38 86 86 86 180 133 36 213 154 11
44776 -236 178 12 246 186 14 246 190 14 246 190 14
44777 -246 190 14 246 190 14 246 190 14 246 190 14
44778 -246 190 14 246 190 14 246 190 14 246 190 14
44779 -246 190 14 246 190 14 246 190 14 246 190 14
44780 -246 190 14 232 195 16 190 146 13 214 214 214
44781 -253 253 253 253 253 253 253 253 253 253 253 253
44782 -253 253 253 253 253 253 253 253 253 253 253 253
44783 -253 253 253 253 253 253 253 253 253 253 253 253
44784 -253 253 253 250 250 250 170 170 170 26 26 26
44785 - 2 2 6 2 2 6 37 26 9 163 110 8
44786 -219 162 10 239 182 13 246 186 14 246 190 14
44787 -246 190 14 246 190 14 246 190 14 246 190 14
44788 -246 190 14 246 190 14 246 190 14 246 190 14
44789 -246 190 14 246 190 14 246 190 14 246 190 14
44790 -246 186 14 236 178 12 224 166 10 142 122 72
44791 - 46 46 46 18 18 18 6 6 6 0 0 0
44792 - 0 0 0 0 0 0 0 0 0 0 0 0
44793 - 0 0 0 0 0 0 0 0 0 0 0 0
44794 - 0 0 0 0 0 0 6 6 6 18 18 18
44795 - 50 50 50 109 106 95 192 133 9 224 166 10
44796 -242 186 14 246 190 14 246 190 14 246 190 14
44797 -246 190 14 246 190 14 246 190 14 246 190 14
44798 -246 190 14 246 190 14 246 190 14 246 190 14
44799 -246 190 14 246 190 14 246 190 14 246 190 14
44800 -242 186 14 226 184 13 210 162 10 142 110 46
44801 -226 226 226 253 253 253 253 253 253 253 253 253
44802 -253 253 253 253 253 253 253 253 253 253 253 253
44803 -253 253 253 253 253 253 253 253 253 253 253 253
44804 -198 198 198 66 66 66 2 2 6 2 2 6
44805 - 2 2 6 2 2 6 50 34 6 156 107 11
44806 -219 162 10 239 182 13 246 186 14 246 190 14
44807 -246 190 14 246 190 14 246 190 14 246 190 14
44808 -246 190 14 246 190 14 246 190 14 246 190 14
44809 -246 190 14 246 190 14 246 190 14 242 186 14
44810 -234 174 13 213 154 11 154 122 46 66 66 66
44811 - 30 30 30 10 10 10 0 0 0 0 0 0
44812 - 0 0 0 0 0 0 0 0 0 0 0 0
44813 - 0 0 0 0 0 0 0 0 0 0 0 0
44814 - 0 0 0 0 0 0 6 6 6 22 22 22
44815 - 58 58 58 154 121 60 206 145 10 234 174 13
44816 -242 186 14 246 186 14 246 190 14 246 190 14
44817 -246 190 14 246 190 14 246 190 14 246 190 14
44818 -246 190 14 246 190 14 246 190 14 246 190 14
44819 -246 190 14 246 190 14 246 190 14 246 190 14
44820 -246 186 14 236 178 12 210 162 10 163 110 8
44821 - 61 42 6 138 138 138 218 218 218 250 250 250
44822 -253 253 253 253 253 253 253 253 253 250 250 250
44823 -242 242 242 210 210 210 144 144 144 66 66 66
44824 - 6 6 6 2 2 6 2 2 6 2 2 6
44825 - 2 2 6 2 2 6 61 42 6 163 110 8
44826 -216 158 10 236 178 12 246 190 14 246 190 14
44827 -246 190 14 246 190 14 246 190 14 246 190 14
44828 -246 190 14 246 190 14 246 190 14 246 190 14
44829 -246 190 14 239 182 13 230 174 11 216 158 10
44830 -190 142 34 124 112 88 70 70 70 38 38 38
44831 - 18 18 18 6 6 6 0 0 0 0 0 0
44832 - 0 0 0 0 0 0 0 0 0 0 0 0
44833 - 0 0 0 0 0 0 0 0 0 0 0 0
44834 - 0 0 0 0 0 0 6 6 6 22 22 22
44835 - 62 62 62 168 124 44 206 145 10 224 166 10
44836 -236 178 12 239 182 13 242 186 14 242 186 14
44837 -246 186 14 246 190 14 246 190 14 246 190 14
44838 -246 190 14 246 190 14 246 190 14 246 190 14
44839 -246 190 14 246 190 14 246 190 14 246 190 14
44840 -246 190 14 236 178 12 216 158 10 175 118 6
44841 - 80 54 7 2 2 6 6 6 6 30 30 30
44842 - 54 54 54 62 62 62 50 50 50 38 38 38
44843 - 14 14 14 2 2 6 2 2 6 2 2 6
44844 - 2 2 6 2 2 6 2 2 6 2 2 6
44845 - 2 2 6 6 6 6 80 54 7 167 114 7
44846 -213 154 11 236 178 12 246 190 14 246 190 14
44847 -246 190 14 246 190 14 246 190 14 246 190 14
44848 -246 190 14 242 186 14 239 182 13 239 182 13
44849 -230 174 11 210 150 10 174 135 50 124 112 88
44850 - 82 82 82 54 54 54 34 34 34 18 18 18
44851 - 6 6 6 0 0 0 0 0 0 0 0 0
44852 - 0 0 0 0 0 0 0 0 0 0 0 0
44853 - 0 0 0 0 0 0 0 0 0 0 0 0
44854 - 0 0 0 0 0 0 6 6 6 18 18 18
44855 - 50 50 50 158 118 36 192 133 9 200 144 11
44856 -216 158 10 219 162 10 224 166 10 226 170 11
44857 -230 174 11 236 178 12 239 182 13 239 182 13
44858 -242 186 14 246 186 14 246 190 14 246 190 14
44859 -246 190 14 246 190 14 246 190 14 246 190 14
44860 -246 186 14 230 174 11 210 150 10 163 110 8
44861 -104 69 6 10 10 10 2 2 6 2 2 6
44862 - 2 2 6 2 2 6 2 2 6 2 2 6
44863 - 2 2 6 2 2 6 2 2 6 2 2 6
44864 - 2 2 6 2 2 6 2 2 6 2 2 6
44865 - 2 2 6 6 6 6 91 60 6 167 114 7
44866 -206 145 10 230 174 11 242 186 14 246 190 14
44867 -246 190 14 246 190 14 246 186 14 242 186 14
44868 -239 182 13 230 174 11 224 166 10 213 154 11
44869 -180 133 36 124 112 88 86 86 86 58 58 58
44870 - 38 38 38 22 22 22 10 10 10 6 6 6
44871 - 0 0 0 0 0 0 0 0 0 0 0 0
44872 - 0 0 0 0 0 0 0 0 0 0 0 0
44873 - 0 0 0 0 0 0 0 0 0 0 0 0
44874 - 0 0 0 0 0 0 0 0 0 14 14 14
44875 - 34 34 34 70 70 70 138 110 50 158 118 36
44876 -167 114 7 180 123 7 192 133 9 197 138 11
44877 -200 144 11 206 145 10 213 154 11 219 162 10
44878 -224 166 10 230 174 11 239 182 13 242 186 14
44879 -246 186 14 246 186 14 246 186 14 246 186 14
44880 -239 182 13 216 158 10 185 133 11 152 99 6
44881 -104 69 6 18 14 6 2 2 6 2 2 6
44882 - 2 2 6 2 2 6 2 2 6 2 2 6
44883 - 2 2 6 2 2 6 2 2 6 2 2 6
44884 - 2 2 6 2 2 6 2 2 6 2 2 6
44885 - 2 2 6 6 6 6 80 54 7 152 99 6
44886 -192 133 9 219 162 10 236 178 12 239 182 13
44887 -246 186 14 242 186 14 239 182 13 236 178 12
44888 -224 166 10 206 145 10 192 133 9 154 121 60
44889 - 94 94 94 62 62 62 42 42 42 22 22 22
44890 - 14 14 14 6 6 6 0 0 0 0 0 0
44891 - 0 0 0 0 0 0 0 0 0 0 0 0
44892 - 0 0 0 0 0 0 0 0 0 0 0 0
44893 - 0 0 0 0 0 0 0 0 0 0 0 0
44894 - 0 0 0 0 0 0 0 0 0 6 6 6
44895 - 18 18 18 34 34 34 58 58 58 78 78 78
44896 -101 98 89 124 112 88 142 110 46 156 107 11
44897 -163 110 8 167 114 7 175 118 6 180 123 7
44898 -185 133 11 197 138 11 210 150 10 219 162 10
44899 -226 170 11 236 178 12 236 178 12 234 174 13
44900 -219 162 10 197 138 11 163 110 8 130 83 6
44901 - 91 60 6 10 10 10 2 2 6 2 2 6
44902 - 18 18 18 38 38 38 38 38 38 38 38 38
44903 - 38 38 38 38 38 38 38 38 38 38 38 38
44904 - 38 38 38 38 38 38 26 26 26 2 2 6
44905 - 2 2 6 6 6 6 70 47 6 137 92 6
44906 -175 118 6 200 144 11 219 162 10 230 174 11
44907 -234 174 13 230 174 11 219 162 10 210 150 10
44908 -192 133 9 163 110 8 124 112 88 82 82 82
44909 - 50 50 50 30 30 30 14 14 14 6 6 6
44910 - 0 0 0 0 0 0 0 0 0 0 0 0
44911 - 0 0 0 0 0 0 0 0 0 0 0 0
44912 - 0 0 0 0 0 0 0 0 0 0 0 0
44913 - 0 0 0 0 0 0 0 0 0 0 0 0
44914 - 0 0 0 0 0 0 0 0 0 0 0 0
44915 - 6 6 6 14 14 14 22 22 22 34 34 34
44916 - 42 42 42 58 58 58 74 74 74 86 86 86
44917 -101 98 89 122 102 70 130 98 46 121 87 25
44918 -137 92 6 152 99 6 163 110 8 180 123 7
44919 -185 133 11 197 138 11 206 145 10 200 144 11
44920 -180 123 7 156 107 11 130 83 6 104 69 6
44921 - 50 34 6 54 54 54 110 110 110 101 98 89
44922 - 86 86 86 82 82 82 78 78 78 78 78 78
44923 - 78 78 78 78 78 78 78 78 78 78 78 78
44924 - 78 78 78 82 82 82 86 86 86 94 94 94
44925 -106 106 106 101 101 101 86 66 34 124 80 6
44926 -156 107 11 180 123 7 192 133 9 200 144 11
44927 -206 145 10 200 144 11 192 133 9 175 118 6
44928 -139 102 15 109 106 95 70 70 70 42 42 42
44929 - 22 22 22 10 10 10 0 0 0 0 0 0
44930 - 0 0 0 0 0 0 0 0 0 0 0 0
44931 - 0 0 0 0 0 0 0 0 0 0 0 0
44932 - 0 0 0 0 0 0 0 0 0 0 0 0
44933 - 0 0 0 0 0 0 0 0 0 0 0 0
44934 - 0 0 0 0 0 0 0 0 0 0 0 0
44935 - 0 0 0 0 0 0 6 6 6 10 10 10
44936 - 14 14 14 22 22 22 30 30 30 38 38 38
44937 - 50 50 50 62 62 62 74 74 74 90 90 90
44938 -101 98 89 112 100 78 121 87 25 124 80 6
44939 -137 92 6 152 99 6 152 99 6 152 99 6
44940 -138 86 6 124 80 6 98 70 6 86 66 30
44941 -101 98 89 82 82 82 58 58 58 46 46 46
44942 - 38 38 38 34 34 34 34 34 34 34 34 34
44943 - 34 34 34 34 34 34 34 34 34 34 34 34
44944 - 34 34 34 34 34 34 38 38 38 42 42 42
44945 - 54 54 54 82 82 82 94 86 76 91 60 6
44946 -134 86 6 156 107 11 167 114 7 175 118 6
44947 -175 118 6 167 114 7 152 99 6 121 87 25
44948 -101 98 89 62 62 62 34 34 34 18 18 18
44949 - 6 6 6 0 0 0 0 0 0 0 0 0
44950 - 0 0 0 0 0 0 0 0 0 0 0 0
44951 - 0 0 0 0 0 0 0 0 0 0 0 0
44952 - 0 0 0 0 0 0 0 0 0 0 0 0
44953 - 0 0 0 0 0 0 0 0 0 0 0 0
44954 - 0 0 0 0 0 0 0 0 0 0 0 0
44955 - 0 0 0 0 0 0 0 0 0 0 0 0
44956 - 0 0 0 6 6 6 6 6 6 10 10 10
44957 - 18 18 18 22 22 22 30 30 30 42 42 42
44958 - 50 50 50 66 66 66 86 86 86 101 98 89
44959 -106 86 58 98 70 6 104 69 6 104 69 6
44960 -104 69 6 91 60 6 82 62 34 90 90 90
44961 - 62 62 62 38 38 38 22 22 22 14 14 14
44962 - 10 10 10 10 10 10 10 10 10 10 10 10
44963 - 10 10 10 10 10 10 6 6 6 10 10 10
44964 - 10 10 10 10 10 10 10 10 10 14 14 14
44965 - 22 22 22 42 42 42 70 70 70 89 81 66
44966 - 80 54 7 104 69 6 124 80 6 137 92 6
44967 -134 86 6 116 81 8 100 82 52 86 86 86
44968 - 58 58 58 30 30 30 14 14 14 6 6 6
44969 - 0 0 0 0 0 0 0 0 0 0 0 0
44970 - 0 0 0 0 0 0 0 0 0 0 0 0
44971 - 0 0 0 0 0 0 0 0 0 0 0 0
44972 - 0 0 0 0 0 0 0 0 0 0 0 0
44973 - 0 0 0 0 0 0 0 0 0 0 0 0
44974 - 0 0 0 0 0 0 0 0 0 0 0 0
44975 - 0 0 0 0 0 0 0 0 0 0 0 0
44976 - 0 0 0 0 0 0 0 0 0 0 0 0
44977 - 0 0 0 6 6 6 10 10 10 14 14 14
44978 - 18 18 18 26 26 26 38 38 38 54 54 54
44979 - 70 70 70 86 86 86 94 86 76 89 81 66
44980 - 89 81 66 86 86 86 74 74 74 50 50 50
44981 - 30 30 30 14 14 14 6 6 6 0 0 0
44982 - 0 0 0 0 0 0 0 0 0 0 0 0
44983 - 0 0 0 0 0 0 0 0 0 0 0 0
44984 - 0 0 0 0 0 0 0 0 0 0 0 0
44985 - 6 6 6 18 18 18 34 34 34 58 58 58
44986 - 82 82 82 89 81 66 89 81 66 89 81 66
44987 - 94 86 66 94 86 76 74 74 74 50 50 50
44988 - 26 26 26 14 14 14 6 6 6 0 0 0
44989 - 0 0 0 0 0 0 0 0 0 0 0 0
44990 - 0 0 0 0 0 0 0 0 0 0 0 0
44991 - 0 0 0 0 0 0 0 0 0 0 0 0
44992 - 0 0 0 0 0 0 0 0 0 0 0 0
44993 - 0 0 0 0 0 0 0 0 0 0 0 0
44994 - 0 0 0 0 0 0 0 0 0 0 0 0
44995 - 0 0 0 0 0 0 0 0 0 0 0 0
44996 - 0 0 0 0 0 0 0 0 0 0 0 0
44997 - 0 0 0 0 0 0 0 0 0 0 0 0
44998 - 6 6 6 6 6 6 14 14 14 18 18 18
44999 - 30 30 30 38 38 38 46 46 46 54 54 54
45000 - 50 50 50 42 42 42 30 30 30 18 18 18
45001 - 10 10 10 0 0 0 0 0 0 0 0 0
45002 - 0 0 0 0 0 0 0 0 0 0 0 0
45003 - 0 0 0 0 0 0 0 0 0 0 0 0
45004 - 0 0 0 0 0 0 0 0 0 0 0 0
45005 - 0 0 0 6 6 6 14 14 14 26 26 26
45006 - 38 38 38 50 50 50 58 58 58 58 58 58
45007 - 54 54 54 42 42 42 30 30 30 18 18 18
45008 - 10 10 10 0 0 0 0 0 0 0 0 0
45009 - 0 0 0 0 0 0 0 0 0 0 0 0
45010 - 0 0 0 0 0 0 0 0 0 0 0 0
45011 - 0 0 0 0 0 0 0 0 0 0 0 0
45012 - 0 0 0 0 0 0 0 0 0 0 0 0
45013 - 0 0 0 0 0 0 0 0 0 0 0 0
45014 - 0 0 0 0 0 0 0 0 0 0 0 0
45015 - 0 0 0 0 0 0 0 0 0 0 0 0
45016 - 0 0 0 0 0 0 0 0 0 0 0 0
45017 - 0 0 0 0 0 0 0 0 0 0 0 0
45018 - 0 0 0 0 0 0 0 0 0 6 6 6
45019 - 6 6 6 10 10 10 14 14 14 18 18 18
45020 - 18 18 18 14 14 14 10 10 10 6 6 6
45021 - 0 0 0 0 0 0 0 0 0 0 0 0
45022 - 0 0 0 0 0 0 0 0 0 0 0 0
45023 - 0 0 0 0 0 0 0 0 0 0 0 0
45024 - 0 0 0 0 0 0 0 0 0 0 0 0
45025 - 0 0 0 0 0 0 0 0 0 6 6 6
45026 - 14 14 14 18 18 18 22 22 22 22 22 22
45027 - 18 18 18 14 14 14 10 10 10 6 6 6
45028 - 0 0 0 0 0 0 0 0 0 0 0 0
45029 - 0 0 0 0 0 0 0 0 0 0 0 0
45030 - 0 0 0 0 0 0 0 0 0 0 0 0
45031 - 0 0 0 0 0 0 0 0 0 0 0 0
45032 - 0 0 0 0 0 0 0 0 0 0 0 0
45033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45046 +4 4 4 4 4 4
45047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45060 +4 4 4 4 4 4
45061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45074 +4 4 4 4 4 4
45075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45088 +4 4 4 4 4 4
45089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45093 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45102 +4 4 4 4 4 4
45103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45107 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45116 +4 4 4 4 4 4
45117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45121 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
45122 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
45123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45126 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
45127 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45128 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
45129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45130 +4 4 4 4 4 4
45131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45132 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45134 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45135 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
45136 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
45137 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45140 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
45141 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
45142 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
45143 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45144 +4 4 4 4 4 4
45145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45146 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45147 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45148 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45149 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
45150 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
45151 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45154 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
45155 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
45156 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
45157 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
45158 +4 4 4 4 4 4
45159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45160 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45162 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
45163 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
45164 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
45165 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
45166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45167 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45168 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
45169 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
45170 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
45171 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
45172 +4 4 4 4 4 4
45173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45174 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45175 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45176 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
45177 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
45178 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
45179 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
45180 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45181 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
45182 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
45183 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
45184 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
45185 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
45186 +4 4 4 4 4 4
45187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45188 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45189 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45190 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
45191 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
45192 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
45193 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
45194 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45195 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
45196 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
45197 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
45198 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
45199 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
45200 +4 4 4 4 4 4
45201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45202 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45203 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
45204 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
45205 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
45206 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
45207 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
45208 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
45209 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
45210 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
45211 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
45212 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
45213 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
45214 +4 4 4 4 4 4
45215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45216 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45217 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
45218 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
45219 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
45220 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
45221 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
45222 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
45223 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
45224 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
45225 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
45226 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
45227 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
45228 +4 4 4 4 4 4
45229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45230 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45231 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
45232 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
45233 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
45234 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
45235 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
45236 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
45237 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
45238 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
45239 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
45240 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
45241 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45242 +4 4 4 4 4 4
45243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45244 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45245 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
45246 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
45247 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
45248 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
45249 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
45250 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
45251 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
45252 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
45253 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
45254 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
45255 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
45256 +4 4 4 4 4 4
45257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45258 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
45259 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
45260 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
45261 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
45262 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
45263 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
45264 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
45265 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
45266 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
45267 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
45268 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
45269 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
45270 +4 4 4 4 4 4
45271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45272 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
45273 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
45274 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
45275 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45276 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
45277 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
45278 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
45279 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
45280 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
45281 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
45282 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
45283 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
45284 +0 0 0 4 4 4
45285 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45286 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
45287 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
45288 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
45289 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
45290 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
45291 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
45292 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
45293 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
45294 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
45295 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
45296 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
45297 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
45298 +2 0 0 0 0 0
45299 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
45300 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
45301 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
45302 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
45303 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
45304 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
45305 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
45306 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
45307 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
45308 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
45309 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
45310 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
45311 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
45312 +37 38 37 0 0 0
45313 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45314 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45315 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45316 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45317 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45318 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45319 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45320 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45321 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45322 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45323 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45324 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45325 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45326 +85 115 134 4 0 0
45327 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45328 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45329 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45330 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45331 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45332 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45333 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45334 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45335 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45336 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45337 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45338 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45339 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45340 +60 73 81 4 0 0
45341 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45342 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45343 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45344 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45345 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45346 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45347 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45348 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45349 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45350 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45351 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45352 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45353 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45354 +16 19 21 4 0 0
45355 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45356 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45357 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45358 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45359 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45360 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45361 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45362 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45363 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45364 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45365 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45366 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45367 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45368 +4 0 0 4 3 3
45369 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45370 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45371 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45372 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45373 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45374 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45375 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45376 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45377 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45378 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45379 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45380 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45381 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45382 +3 2 2 4 4 4
45383 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45384 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45385 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45386 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45387 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45388 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45389 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45390 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45391 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45392 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45393 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45394 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45395 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45396 +4 4 4 4 4 4
45397 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45398 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45399 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45400 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45401 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45402 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45403 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45404 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45405 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45406 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45407 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45408 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45409 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45410 +4 4 4 4 4 4
45411 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45412 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45413 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45414 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45415 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45416 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45417 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45418 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45419 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45420 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45421 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45422 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45423 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45424 +5 5 5 5 5 5
45425 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45426 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45427 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45428 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45429 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45430 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45431 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45432 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45433 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45434 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45435 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45436 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45437 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45438 +5 5 5 4 4 4
45439 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45440 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45441 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45442 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45443 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45444 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45445 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45446 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45447 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45448 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45449 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45450 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45452 +4 4 4 4 4 4
45453 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45454 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45455 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45456 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45457 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45458 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45459 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45460 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45461 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45462 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45463 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45464 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45466 +4 4 4 4 4 4
45467 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45468 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45469 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45470 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45471 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45472 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45473 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45474 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45475 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45476 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45477 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45480 +4 4 4 4 4 4
45481 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45482 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45483 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45484 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45485 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45486 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45487 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45488 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45489 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45490 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45491 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45494 +4 4 4 4 4 4
45495 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45496 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45497 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45498 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45499 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45500 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45501 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45502 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45503 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45504 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45505 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45508 +4 4 4 4 4 4
45509 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45510 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45511 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45512 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45513 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45514 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45515 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45516 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45517 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45518 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45519 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45522 +4 4 4 4 4 4
45523 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45524 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45525 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45526 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45527 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45528 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45529 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45530 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45531 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45532 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45533 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45534 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45535 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45536 +4 4 4 4 4 4
45537 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45538 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45539 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45540 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45541 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45542 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45543 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45544 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45545 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45546 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45547 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45548 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45549 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45550 +4 4 4 4 4 4
45551 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45552 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45553 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45554 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45555 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45556 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45557 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45558 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45559 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45560 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45561 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45562 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45563 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45564 +4 4 4 4 4 4
45565 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45566 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45567 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45568 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45569 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45570 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45571 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45572 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45573 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45574 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45575 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45577 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45578 +4 4 4 4 4 4
45579 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45580 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45581 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45582 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45583 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45584 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45585 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45586 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45587 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45588 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45589 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45590 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45591 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45592 +4 4 4 4 4 4
45593 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45594 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45595 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45596 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45597 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45598 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45599 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45600 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45601 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45602 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45603 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45605 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45606 +4 4 4 4 4 4
45607 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45608 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45609 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45610 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45611 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45612 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45613 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45614 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45615 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45616 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45617 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45620 +4 4 4 4 4 4
45621 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45622 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45623 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45624 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45625 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45626 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45627 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45628 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45629 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45630 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45631 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45633 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45634 +4 4 4 4 4 4
45635 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45636 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45637 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45638 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45639 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45640 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45641 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45642 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45643 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45644 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45645 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45648 +4 4 4 4 4 4
45649 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45650 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45651 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45652 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45653 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45654 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45655 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45656 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45657 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45658 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45659 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45662 +4 4 4 4 4 4
45663 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45664 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45665 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45666 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45667 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45668 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45669 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45670 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45671 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45672 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45673 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45674 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45676 +4 4 4 4 4 4
45677 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45678 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45679 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45680 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45681 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45682 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45683 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45684 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45685 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45686 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45687 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45688 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45690 +4 4 4 4 4 4
45691 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45692 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45693 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45694 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45695 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45696 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45697 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45698 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45699 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45700 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45701 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45704 +4 4 4 4 4 4
45705 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45706 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45707 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45708 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45709 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45710 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45711 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45712 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45713 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45714 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45715 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45718 +4 4 4 4 4 4
45719 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45720 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45721 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45722 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45723 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45724 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45725 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45726 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45727 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45728 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45729 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45732 +4 4 4 4 4 4
45733 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45734 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45735 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45736 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45737 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45738 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45739 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45740 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45741 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45742 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45743 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45746 +4 4 4 4 4 4
45747 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45748 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45749 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45750 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45751 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45752 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45753 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45754 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45755 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45756 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45757 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45758 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45760 +4 4 4 4 4 4
45761 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45762 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45763 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45764 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45765 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45766 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45767 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45768 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45769 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45770 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45771 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45772 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45774 +4 4 4 4 4 4
45775 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45776 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45777 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45778 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45779 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45780 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45781 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45782 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45783 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45784 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45785 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45786 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45788 +4 4 4 4 4 4
45789 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45790 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45791 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45792 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45793 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45794 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45795 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45796 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45797 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45798 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45799 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45800 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45802 +4 4 4 4 4 4
45803 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45804 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45805 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45806 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45807 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45808 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45809 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45810 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45811 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45812 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45813 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45814 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45816 +4 4 4 4 4 4
45817 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45818 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45819 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45820 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45821 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45822 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45823 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45824 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45825 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45826 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45827 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45830 +4 4 4 4 4 4
45831 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45832 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45833 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45834 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45835 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45836 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45837 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45838 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45839 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45840 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45841 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45844 +4 4 4 4 4 4
45845 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45846 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45847 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45848 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45849 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45850 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45851 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45852 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45853 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45854 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45858 +4 4 4 4 4 4
45859 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45860 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45861 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45862 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45863 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45864 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45865 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45866 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45867 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45868 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45872 +4 4 4 4 4 4
45873 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45874 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45875 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45876 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45877 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45878 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45879 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45880 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45881 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45882 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45886 +4 4 4 4 4 4
45887 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45888 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45889 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45890 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45891 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45892 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45893 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45894 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45895 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45896 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45900 +4 4 4 4 4 4
45901 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45902 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45903 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45904 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45905 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45906 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45907 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45908 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45909 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45914 +4 4 4 4 4 4
45915 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45916 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45917 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45918 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45919 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45920 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45921 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45922 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45923 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45928 +4 4 4 4 4 4
45929 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45930 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45931 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45932 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45933 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45934 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45935 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45936 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45937 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45942 +4 4 4 4 4 4
45943 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45944 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45945 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45946 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45947 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45948 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45949 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45950 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45956 +4 4 4 4 4 4
45957 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45958 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45959 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45960 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45961 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45962 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45963 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45964 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45970 +4 4 4 4 4 4
45971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45972 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45973 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45974 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45975 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45976 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45977 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45978 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45984 +4 4 4 4 4 4
45985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45986 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45987 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45988 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45989 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45990 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45991 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45992 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45998 +4 4 4 4 4 4
45999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46000 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
46001 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
46002 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46003 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
46004 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
46005 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
46006 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46012 +4 4 4 4 4 4
46013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46015 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46016 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
46017 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
46018 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
46019 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
46020 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46026 +4 4 4 4 4 4
46027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46030 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46031 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
46032 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
46033 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
46034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46040 +4 4 4 4 4 4
46041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46044 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46045 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46046 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
46047 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
46048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46054 +4 4 4 4 4 4
46055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46058 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46059 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46060 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46061 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
46062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46068 +4 4 4 4 4 4
46069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46072 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
46073 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
46074 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
46075 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
46076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46082 +4 4 4 4 4 4
46083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46087 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
46088 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46089 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46093 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46096 +4 4 4 4 4 4
46097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46101 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
46102 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
46103 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46107 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46110 +4 4 4 4 4 4
46111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46115 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
46116 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
46117 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46121 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46124 +4 4 4 4 4 4
46125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46128 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46129 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
46130 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
46131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46132 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46134 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46135 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46136 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46137 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46138 +4 4 4 4 4 4
46139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46142 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46143 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46144 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
46145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46146 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46147 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46148 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46149 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46151 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46152 +4 4 4 4 4 4
46153 diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
46154 index 443e3c8..c443d6a 100644
46155 --- a/drivers/video/nvidia/nv_backlight.c
46156 +++ b/drivers/video/nvidia/nv_backlight.c
46157 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
46158 return bd->props.brightness;
46159 }
46160
46161 -static struct backlight_ops nvidia_bl_ops = {
46162 +static const struct backlight_ops nvidia_bl_ops = {
46163 .get_brightness = nvidia_bl_get_brightness,
46164 .update_status = nvidia_bl_update_status,
46165 };
46166 diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
46167 index d94c57f..912984c 100644
46168 --- a/drivers/video/riva/fbdev.c
46169 +++ b/drivers/video/riva/fbdev.c
46170 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
46171 return bd->props.brightness;
46172 }
46173
46174 -static struct backlight_ops riva_bl_ops = {
46175 +static const struct backlight_ops riva_bl_ops = {
46176 .get_brightness = riva_bl_get_brightness,
46177 .update_status = riva_bl_update_status,
46178 };
46179 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
46180 index 54fbb29..2c108fc 100644
46181 --- a/drivers/video/uvesafb.c
46182 +++ b/drivers/video/uvesafb.c
46183 @@ -18,6 +18,7 @@
46184 #include <linux/fb.h>
46185 #include <linux/io.h>
46186 #include <linux/mutex.h>
46187 +#include <linux/moduleloader.h>
46188 #include <video/edid.h>
46189 #include <video/uvesafb.h>
46190 #ifdef CONFIG_X86
46191 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
46192 NULL,
46193 };
46194
46195 - return call_usermodehelper(v86d_path, argv, envp, 1);
46196 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
46197 }
46198
46199 /*
46200 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
46201 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
46202 par->pmi_setpal = par->ypan = 0;
46203 } else {
46204 +
46205 +#ifdef CONFIG_PAX_KERNEXEC
46206 +#ifdef CONFIG_MODULES
46207 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
46208 +#endif
46209 + if (!par->pmi_code) {
46210 + par->pmi_setpal = par->ypan = 0;
46211 + return 0;
46212 + }
46213 +#endif
46214 +
46215 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
46216 + task->t.regs.edi);
46217 +
46218 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46219 + pax_open_kernel();
46220 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
46221 + pax_close_kernel();
46222 +
46223 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
46224 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
46225 +#else
46226 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
46227 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
46228 +#endif
46229 +
46230 printk(KERN_INFO "uvesafb: protected mode interface info at "
46231 "%04x:%04x\n",
46232 (u16)task->t.regs.es, (u16)task->t.regs.edi);
46233 @@ -1799,6 +1822,11 @@ out:
46234 if (par->vbe_modes)
46235 kfree(par->vbe_modes);
46236
46237 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46238 + if (par->pmi_code)
46239 + module_free_exec(NULL, par->pmi_code);
46240 +#endif
46241 +
46242 framebuffer_release(info);
46243 return err;
46244 }
46245 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
46246 kfree(par->vbe_state_orig);
46247 if (par->vbe_state_saved)
46248 kfree(par->vbe_state_saved);
46249 +
46250 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46251 + if (par->pmi_code)
46252 + module_free_exec(NULL, par->pmi_code);
46253 +#endif
46254 +
46255 }
46256
46257 framebuffer_release(info);
46258 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
46259 index bd37ee1..cb827e8 100644
46260 --- a/drivers/video/vesafb.c
46261 +++ b/drivers/video/vesafb.c
46262 @@ -9,6 +9,7 @@
46263 */
46264
46265 #include <linux/module.h>
46266 +#include <linux/moduleloader.h>
46267 #include <linux/kernel.h>
46268 #include <linux/errno.h>
46269 #include <linux/string.h>
46270 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
46271 static int vram_total __initdata; /* Set total amount of memory */
46272 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
46273 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
46274 -static void (*pmi_start)(void) __read_mostly;
46275 -static void (*pmi_pal) (void) __read_mostly;
46276 +static void (*pmi_start)(void) __read_only;
46277 +static void (*pmi_pal) (void) __read_only;
46278 static int depth __read_mostly;
46279 static int vga_compat __read_mostly;
46280 /* --------------------------------------------------------------------- */
46281 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
46282 unsigned int size_vmode;
46283 unsigned int size_remap;
46284 unsigned int size_total;
46285 + void *pmi_code = NULL;
46286
46287 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
46288 return -ENODEV;
46289 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
46290 size_remap = size_total;
46291 vesafb_fix.smem_len = size_remap;
46292
46293 -#ifndef __i386__
46294 - screen_info.vesapm_seg = 0;
46295 -#endif
46296 -
46297 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
46298 printk(KERN_WARNING
46299 "vesafb: cannot reserve video memory at 0x%lx\n",
46300 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
46301 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
46302 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
46303
46304 +#ifdef __i386__
46305 +
46306 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46307 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
46308 + if (!pmi_code)
46309 +#elif !defined(CONFIG_PAX_KERNEXEC)
46310 + if (0)
46311 +#endif
46312 +
46313 +#endif
46314 + screen_info.vesapm_seg = 0;
46315 +
46316 if (screen_info.vesapm_seg) {
46317 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
46318 - screen_info.vesapm_seg,screen_info.vesapm_off);
46319 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
46320 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
46321 }
46322
46323 if (screen_info.vesapm_seg < 0xc000)
46324 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
46325
46326 if (ypan || pmi_setpal) {
46327 unsigned short *pmi_base;
46328 +
46329 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
46330 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
46331 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
46332 +
46333 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46334 + pax_open_kernel();
46335 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
46336 +#else
46337 + pmi_code = pmi_base;
46338 +#endif
46339 +
46340 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
46341 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
46342 +
46343 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46344 + pmi_start = ktva_ktla(pmi_start);
46345 + pmi_pal = ktva_ktla(pmi_pal);
46346 + pax_close_kernel();
46347 +#endif
46348 +
46349 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
46350 if (pmi_base[3]) {
46351 printk(KERN_INFO "vesafb: pmi: ports = ");
46352 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
46353 info->node, info->fix.id);
46354 return 0;
46355 err:
46356 +
46357 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46358 + module_free_exec(NULL, pmi_code);
46359 +#endif
46360 +
46361 if (info->screen_base)
46362 iounmap(info->screen_base);
46363 framebuffer_release(info);
46364 diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
46365 index 88a60e0..6783cc2 100644
46366 --- a/drivers/xen/sys-hypervisor.c
46367 +++ b/drivers/xen/sys-hypervisor.c
46368 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
46369 return 0;
46370 }
46371
46372 -static struct sysfs_ops hyp_sysfs_ops = {
46373 +static const struct sysfs_ops hyp_sysfs_ops = {
46374 .show = hyp_sysfs_show,
46375 .store = hyp_sysfs_store,
46376 };
46377 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
46378 index 18f74ec..3227009 100644
46379 --- a/fs/9p/vfs_inode.c
46380 +++ b/fs/9p/vfs_inode.c
46381 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46382 static void
46383 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46384 {
46385 - char *s = nd_get_link(nd);
46386 + const char *s = nd_get_link(nd);
46387
46388 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
46389 IS_ERR(s) ? "<error>" : s);
46390 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
46391 index bb4cc5b..df5eaa0 100644
46392 --- a/fs/Kconfig.binfmt
46393 +++ b/fs/Kconfig.binfmt
46394 @@ -86,7 +86,7 @@ config HAVE_AOUT
46395
46396 config BINFMT_AOUT
46397 tristate "Kernel support for a.out and ECOFF binaries"
46398 - depends on HAVE_AOUT
46399 + depends on HAVE_AOUT && BROKEN
46400 ---help---
46401 A.out (Assembler.OUTput) is a set of formats for libraries and
46402 executables used in the earliest versions of UNIX. Linux used
46403 diff --git a/fs/aio.c b/fs/aio.c
46404 index 22a19ad..d484e5b 100644
46405 --- a/fs/aio.c
46406 +++ b/fs/aio.c
46407 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
46408 size += sizeof(struct io_event) * nr_events;
46409 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
46410
46411 - if (nr_pages < 0)
46412 + if (nr_pages <= 0)
46413 return -EINVAL;
46414
46415 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
46416 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
46417 struct aio_timeout to;
46418 int retry = 0;
46419
46420 + pax_track_stack();
46421 +
46422 /* needed to zero any padding within an entry (there shouldn't be
46423 * any, but C is fun!
46424 */
46425 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
46426 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
46427 {
46428 ssize_t ret;
46429 + struct iovec iovstack;
46430
46431 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
46432 kiocb->ki_nbytes, 1,
46433 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
46434 + &iovstack, &kiocb->ki_iovec);
46435 if (ret < 0)
46436 goto out;
46437
46438 + if (kiocb->ki_iovec == &iovstack) {
46439 + kiocb->ki_inline_vec = iovstack;
46440 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
46441 + }
46442 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46443 kiocb->ki_cur_seg = 0;
46444 /* ki_nbytes/left now reflect bytes instead of segs */
46445 diff --git a/fs/attr.c b/fs/attr.c
46446 index 96d394b..33cf5b4 100644
46447 --- a/fs/attr.c
46448 +++ b/fs/attr.c
46449 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46450 unsigned long limit;
46451
46452 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46453 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46454 if (limit != RLIM_INFINITY && offset > limit)
46455 goto out_sig;
46456 if (offset > inode->i_sb->s_maxbytes)
46457 diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46458 index 4a1401c..05eb5ca 100644
46459 --- a/fs/autofs/root.c
46460 +++ b/fs/autofs/root.c
46461 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46462 set_bit(n,sbi->symlink_bitmap);
46463 sl = &sbi->symlink[n];
46464 sl->len = strlen(symname);
46465 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46466 + slsize = sl->len+1;
46467 + sl->data = kmalloc(slsize, GFP_KERNEL);
46468 if (!sl->data) {
46469 clear_bit(n,sbi->symlink_bitmap);
46470 unlock_kernel();
46471 diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46472 index b4ea829..e63ef18 100644
46473 --- a/fs/autofs4/symlink.c
46474 +++ b/fs/autofs4/symlink.c
46475 @@ -15,7 +15,7 @@
46476 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46477 {
46478 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46479 - nd_set_link(nd, (char *)ino->u.symlink);
46480 + nd_set_link(nd, ino->u.symlink);
46481 return NULL;
46482 }
46483
46484 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46485 index 136a0d6..a287331 100644
46486 --- a/fs/autofs4/waitq.c
46487 +++ b/fs/autofs4/waitq.c
46488 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46489 {
46490 unsigned long sigpipe, flags;
46491 mm_segment_t fs;
46492 - const char *data = (const char *)addr;
46493 + const char __user *data = (const char __force_user *)addr;
46494 ssize_t wr = 0;
46495
46496 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46497 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46498 index 9158c07..3f06659 100644
46499 --- a/fs/befs/linuxvfs.c
46500 +++ b/fs/befs/linuxvfs.c
46501 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46502 {
46503 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46504 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46505 - char *link = nd_get_link(nd);
46506 + const char *link = nd_get_link(nd);
46507 if (!IS_ERR(link))
46508 kfree(link);
46509 }
46510 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46511 index 0133b5a..3710d09 100644
46512 --- a/fs/binfmt_aout.c
46513 +++ b/fs/binfmt_aout.c
46514 @@ -16,6 +16,7 @@
46515 #include <linux/string.h>
46516 #include <linux/fs.h>
46517 #include <linux/file.h>
46518 +#include <linux/security.h>
46519 #include <linux/stat.h>
46520 #include <linux/fcntl.h>
46521 #include <linux/ptrace.h>
46522 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46523 #endif
46524 # define START_STACK(u) (u.start_stack)
46525
46526 + memset(&dump, 0, sizeof(dump));
46527 +
46528 fs = get_fs();
46529 set_fs(KERNEL_DS);
46530 has_dumped = 1;
46531 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46532
46533 /* If the size of the dump file exceeds the rlimit, then see what would happen
46534 if we wrote the stack, but not the data area. */
46535 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46536 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46537 dump.u_dsize = 0;
46538
46539 /* Make sure we have enough room to write the stack and data areas. */
46540 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46541 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46542 dump.u_ssize = 0;
46543
46544 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46545 dump_size = dump.u_ssize << PAGE_SHIFT;
46546 DUMP_WRITE(dump_start,dump_size);
46547 }
46548 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
46549 - set_fs(KERNEL_DS);
46550 - DUMP_WRITE(current,sizeof(*current));
46551 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46552 end_coredump:
46553 set_fs(fs);
46554 return has_dumped;
46555 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46556 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46557 if (rlim >= RLIM_INFINITY)
46558 rlim = ~0;
46559 +
46560 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46561 if (ex.a_data + ex.a_bss > rlim)
46562 return -ENOMEM;
46563
46564 @@ -274,9 +279,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46565 current->mm->free_area_cache = current->mm->mmap_base;
46566 current->mm->cached_hole_size = 0;
46567
46568 + retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
46569 + if (retval < 0) {
46570 + /* Someone check-me: is this error path enough? */
46571 + send_sig(SIGKILL, current, 0);
46572 + return retval;
46573 + }
46574 +
46575 install_exec_creds(bprm);
46576 current->flags &= ~PF_FORKNOEXEC;
46577
46578 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46579 + current->mm->pax_flags = 0UL;
46580 +#endif
46581 +
46582 +#ifdef CONFIG_PAX_PAGEEXEC
46583 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46584 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46585 +
46586 +#ifdef CONFIG_PAX_EMUTRAMP
46587 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46588 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46589 +#endif
46590 +
46591 +#ifdef CONFIG_PAX_MPROTECT
46592 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46593 + current->mm->pax_flags |= MF_PAX_MPROTECT;
46594 +#endif
46595 +
46596 + }
46597 +#endif
46598 +
46599 if (N_MAGIC(ex) == OMAGIC) {
46600 unsigned long text_addr, map_size;
46601 loff_t pos;
46602 @@ -349,7 +382,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46603
46604 down_write(&current->mm->mmap_sem);
46605 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46606 - PROT_READ | PROT_WRITE | PROT_EXEC,
46607 + PROT_READ | PROT_WRITE,
46608 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46609 fd_offset + ex.a_text);
46610 up_write(&current->mm->mmap_sem);
46611 @@ -367,13 +400,6 @@ beyond_if:
46612 return retval;
46613 }
46614
46615 - retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
46616 - if (retval < 0) {
46617 - /* Someone check-me: is this error path enough? */
46618 - send_sig(SIGKILL, current, 0);
46619 - return retval;
46620 - }
46621 -
46622 current->mm->start_stack =
46623 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
46624 #ifdef __alpha__
46625 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46626 index 1ed37ba..66794b9 100644
46627 --- a/fs/binfmt_elf.c
46628 +++ b/fs/binfmt_elf.c
46629 @@ -31,6 +31,7 @@
46630 #include <linux/random.h>
46631 #include <linux/elf.h>
46632 #include <linux/utsname.h>
46633 +#include <linux/xattr.h>
46634 #include <asm/uaccess.h>
46635 #include <asm/param.h>
46636 #include <asm/page.h>
46637 @@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46638 #define elf_core_dump NULL
46639 #endif
46640
46641 +#ifdef CONFIG_PAX_MPROTECT
46642 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46643 +#endif
46644 +
46645 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46646 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46647 #else
46648 @@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
46649 .load_binary = load_elf_binary,
46650 .load_shlib = load_elf_library,
46651 .core_dump = elf_core_dump,
46652 +
46653 +#ifdef CONFIG_PAX_MPROTECT
46654 + .handle_mprotect= elf_handle_mprotect,
46655 +#endif
46656 +
46657 .min_coredump = ELF_EXEC_PAGESIZE,
46658 .hasvdso = 1
46659 };
46660 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
46661
46662 static int set_brk(unsigned long start, unsigned long end)
46663 {
46664 + unsigned long e = end;
46665 +
46666 start = ELF_PAGEALIGN(start);
46667 end = ELF_PAGEALIGN(end);
46668 if (end > start) {
46669 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
46670 if (BAD_ADDR(addr))
46671 return addr;
46672 }
46673 - current->mm->start_brk = current->mm->brk = end;
46674 + current->mm->start_brk = current->mm->brk = e;
46675 return 0;
46676 }
46677
46678 @@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46679 elf_addr_t __user *u_rand_bytes;
46680 const char *k_platform = ELF_PLATFORM;
46681 const char *k_base_platform = ELF_BASE_PLATFORM;
46682 - unsigned char k_rand_bytes[16];
46683 + u32 k_rand_bytes[4];
46684 int items;
46685 elf_addr_t *elf_info;
46686 int ei_index = 0;
46687 const struct cred *cred = current_cred();
46688 struct vm_area_struct *vma;
46689 + unsigned long saved_auxv[AT_VECTOR_SIZE];
46690 +
46691 + pax_track_stack();
46692
46693 /*
46694 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46695 @@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46696 * Generate 16 random bytes for userspace PRNG seeding.
46697 */
46698 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46699 - u_rand_bytes = (elf_addr_t __user *)
46700 - STACK_ALLOC(p, sizeof(k_rand_bytes));
46701 + srandom32(k_rand_bytes[0] ^ random32());
46702 + srandom32(k_rand_bytes[1] ^ random32());
46703 + srandom32(k_rand_bytes[2] ^ random32());
46704 + srandom32(k_rand_bytes[3] ^ random32());
46705 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
46706 + u_rand_bytes = (elf_addr_t __user *) p;
46707 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46708 return -EFAULT;
46709
46710 @@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46711 return -EFAULT;
46712 current->mm->env_end = p;
46713
46714 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46715 +
46716 /* Put the elf_info on the stack in the right place. */
46717 sp = (elf_addr_t __user *)envp + 1;
46718 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46719 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46720 return -EFAULT;
46721 return 0;
46722 }
46723 @@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46724 {
46725 struct elf_phdr *elf_phdata;
46726 struct elf_phdr *eppnt;
46727 - unsigned long load_addr = 0;
46728 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46729 int load_addr_set = 0;
46730 unsigned long last_bss = 0, elf_bss = 0;
46731 - unsigned long error = ~0UL;
46732 + unsigned long error = -EINVAL;
46733 unsigned long total_size;
46734 int retval, i, size;
46735
46736 @@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46737 goto out_close;
46738 }
46739
46740 +#ifdef CONFIG_PAX_SEGMEXEC
46741 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46742 + pax_task_size = SEGMEXEC_TASK_SIZE;
46743 +#endif
46744 +
46745 eppnt = elf_phdata;
46746 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46747 if (eppnt->p_type == PT_LOAD) {
46748 @@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46749 k = load_addr + eppnt->p_vaddr;
46750 if (BAD_ADDR(k) ||
46751 eppnt->p_filesz > eppnt->p_memsz ||
46752 - eppnt->p_memsz > TASK_SIZE ||
46753 - TASK_SIZE - eppnt->p_memsz < k) {
46754 + eppnt->p_memsz > pax_task_size ||
46755 + pax_task_size - eppnt->p_memsz < k) {
46756 error = -ENOMEM;
46757 goto out_close;
46758 }
46759 @@ -532,6 +558,351 @@ out:
46760 return error;
46761 }
46762
46763 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46764 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
46765 +{
46766 + unsigned long pax_flags = 0UL;
46767 +
46768 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46769 +
46770 +#ifdef CONFIG_PAX_PAGEEXEC
46771 + if (elf_phdata->p_flags & PF_PAGEEXEC)
46772 + pax_flags |= MF_PAX_PAGEEXEC;
46773 +#endif
46774 +
46775 +#ifdef CONFIG_PAX_SEGMEXEC
46776 + if (elf_phdata->p_flags & PF_SEGMEXEC)
46777 + pax_flags |= MF_PAX_SEGMEXEC;
46778 +#endif
46779 +
46780 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46781 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46782 + if (nx_enabled)
46783 + pax_flags &= ~MF_PAX_SEGMEXEC;
46784 + else
46785 + pax_flags &= ~MF_PAX_PAGEEXEC;
46786 + }
46787 +#endif
46788 +
46789 +#ifdef CONFIG_PAX_EMUTRAMP
46790 + if (elf_phdata->p_flags & PF_EMUTRAMP)
46791 + pax_flags |= MF_PAX_EMUTRAMP;
46792 +#endif
46793 +
46794 +#ifdef CONFIG_PAX_MPROTECT
46795 + if (elf_phdata->p_flags & PF_MPROTECT)
46796 + pax_flags |= MF_PAX_MPROTECT;
46797 +#endif
46798 +
46799 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46800 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46801 + pax_flags |= MF_PAX_RANDMMAP;
46802 +#endif
46803 +
46804 +#endif
46805 +
46806 + return pax_flags;
46807 +}
46808 +
46809 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
46810 +{
46811 + unsigned long pax_flags = 0UL;
46812 +
46813 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46814 +
46815 +#ifdef CONFIG_PAX_PAGEEXEC
46816 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46817 + pax_flags |= MF_PAX_PAGEEXEC;
46818 +#endif
46819 +
46820 +#ifdef CONFIG_PAX_SEGMEXEC
46821 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46822 + pax_flags |= MF_PAX_SEGMEXEC;
46823 +#endif
46824 +
46825 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46826 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46827 + if (nx_enabled)
46828 + pax_flags &= ~MF_PAX_SEGMEXEC;
46829 + else
46830 + pax_flags &= ~MF_PAX_PAGEEXEC;
46831 + }
46832 +#endif
46833 +
46834 +#ifdef CONFIG_PAX_EMUTRAMP
46835 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46836 + pax_flags |= MF_PAX_EMUTRAMP;
46837 +#endif
46838 +
46839 +#ifdef CONFIG_PAX_MPROTECT
46840 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46841 + pax_flags |= MF_PAX_MPROTECT;
46842 +#endif
46843 +
46844 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46845 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46846 + pax_flags |= MF_PAX_RANDMMAP;
46847 +#endif
46848 +
46849 +#endif
46850 +
46851 + return pax_flags;
46852 +}
46853 +
46854 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46855 +{
46856 + unsigned long pax_flags = 0UL;
46857 +
46858 +#ifdef CONFIG_PAX_EI_PAX
46859 +
46860 +#ifdef CONFIG_PAX_PAGEEXEC
46861 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46862 + pax_flags |= MF_PAX_PAGEEXEC;
46863 +#endif
46864 +
46865 +#ifdef CONFIG_PAX_SEGMEXEC
46866 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46867 + pax_flags |= MF_PAX_SEGMEXEC;
46868 +#endif
46869 +
46870 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46871 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46872 + if (nx_enabled)
46873 + pax_flags &= ~MF_PAX_SEGMEXEC;
46874 + else
46875 + pax_flags &= ~MF_PAX_PAGEEXEC;
46876 + }
46877 +#endif
46878 +
46879 +#ifdef CONFIG_PAX_EMUTRAMP
46880 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46881 + pax_flags |= MF_PAX_EMUTRAMP;
46882 +#endif
46883 +
46884 +#ifdef CONFIG_PAX_MPROTECT
46885 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46886 + pax_flags |= MF_PAX_MPROTECT;
46887 +#endif
46888 +
46889 +#ifdef CONFIG_PAX_ASLR
46890 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46891 + pax_flags |= MF_PAX_RANDMMAP;
46892 +#endif
46893 +
46894 +#else
46895 +
46896 +#ifdef CONFIG_PAX_PAGEEXEC
46897 + pax_flags |= MF_PAX_PAGEEXEC;
46898 +#endif
46899 +
46900 +#ifdef CONFIG_PAX_MPROTECT
46901 + pax_flags |= MF_PAX_MPROTECT;
46902 +#endif
46903 +
46904 +#ifdef CONFIG_PAX_RANDMMAP
46905 + pax_flags |= MF_PAX_RANDMMAP;
46906 +#endif
46907 +
46908 +#ifdef CONFIG_PAX_SEGMEXEC
46909 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
46910 + pax_flags &= ~MF_PAX_PAGEEXEC;
46911 + pax_flags |= MF_PAX_SEGMEXEC;
46912 + }
46913 +#endif
46914 +
46915 +#endif
46916 +
46917 + return pax_flags;
46918 +}
46919 +
46920 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46921 +{
46922 +
46923 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46924 + unsigned long i;
46925 +
46926 + for (i = 0UL; i < elf_ex->e_phnum; i++)
46927 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46928 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46929 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46930 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46931 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46932 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46933 + return ~0UL;
46934 +
46935 +#ifdef CONFIG_PAX_SOFTMODE
46936 + if (pax_softmode)
46937 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
46938 + else
46939 +#endif
46940 +
46941 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
46942 + break;
46943 + }
46944 +#endif
46945 +
46946 + return ~0UL;
46947 +}
46948 +
46949 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46950 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
46951 +{
46952 + unsigned long pax_flags = 0UL;
46953 +
46954 +#ifdef CONFIG_PAX_PAGEEXEC
46955 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
46956 + pax_flags |= MF_PAX_PAGEEXEC;
46957 +#endif
46958 +
46959 +#ifdef CONFIG_PAX_SEGMEXEC
46960 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
46961 + pax_flags |= MF_PAX_SEGMEXEC;
46962 +#endif
46963 +
46964 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46965 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46966 + if ((__supported_pte_mask & _PAGE_NX))
46967 + pax_flags &= ~MF_PAX_SEGMEXEC;
46968 + else
46969 + pax_flags &= ~MF_PAX_PAGEEXEC;
46970 + }
46971 +#endif
46972 +
46973 +#ifdef CONFIG_PAX_EMUTRAMP
46974 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
46975 + pax_flags |= MF_PAX_EMUTRAMP;
46976 +#endif
46977 +
46978 +#ifdef CONFIG_PAX_MPROTECT
46979 + if (pax_flags_softmode & MF_PAX_MPROTECT)
46980 + pax_flags |= MF_PAX_MPROTECT;
46981 +#endif
46982 +
46983 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46984 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
46985 + pax_flags |= MF_PAX_RANDMMAP;
46986 +#endif
46987 +
46988 + return pax_flags;
46989 +}
46990 +
46991 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
46992 +{
46993 + unsigned long pax_flags = 0UL;
46994 +
46995 +#ifdef CONFIG_PAX_PAGEEXEC
46996 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
46997 + pax_flags |= MF_PAX_PAGEEXEC;
46998 +#endif
46999 +
47000 +#ifdef CONFIG_PAX_SEGMEXEC
47001 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
47002 + pax_flags |= MF_PAX_SEGMEXEC;
47003 +#endif
47004 +
47005 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
47006 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47007 + if ((__supported_pte_mask & _PAGE_NX))
47008 + pax_flags &= ~MF_PAX_SEGMEXEC;
47009 + else
47010 + pax_flags &= ~MF_PAX_PAGEEXEC;
47011 + }
47012 +#endif
47013 +
47014 +#ifdef CONFIG_PAX_EMUTRAMP
47015 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
47016 + pax_flags |= MF_PAX_EMUTRAMP;
47017 +#endif
47018 +
47019 +#ifdef CONFIG_PAX_MPROTECT
47020 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
47021 + pax_flags |= MF_PAX_MPROTECT;
47022 +#endif
47023 +
47024 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47025 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
47026 + pax_flags |= MF_PAX_RANDMMAP;
47027 +#endif
47028 +
47029 + return pax_flags;
47030 +}
47031 +#endif
47032 +
47033 +static unsigned long pax_parse_xattr_pax(struct file * const file)
47034 +{
47035 +
47036 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
47037 + ssize_t xattr_size, i;
47038 + unsigned char xattr_value[5];
47039 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
47040 +
47041 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
47042 + if (xattr_size <= 0)
47043 + return ~0UL;
47044 +
47045 + for (i = 0; i < xattr_size; i++)
47046 + switch (xattr_value[i]) {
47047 + default:
47048 + return ~0UL;
47049 +
47050 +#define parse_flag(option1, option2, flag) \
47051 + case option1: \
47052 + pax_flags_hardmode |= MF_PAX_##flag; \
47053 + break; \
47054 + case option2: \
47055 + pax_flags_softmode |= MF_PAX_##flag; \
47056 + break;
47057 +
47058 + parse_flag('p', 'P', PAGEEXEC);
47059 + parse_flag('e', 'E', EMUTRAMP);
47060 + parse_flag('m', 'M', MPROTECT);
47061 + parse_flag('r', 'R', RANDMMAP);
47062 + parse_flag('s', 'S', SEGMEXEC);
47063 +
47064 +#undef parse_flag
47065 + }
47066 +
47067 + if (pax_flags_hardmode & pax_flags_softmode)
47068 + return ~0UL;
47069 +
47070 +#ifdef CONFIG_PAX_SOFTMODE
47071 + if (pax_softmode)
47072 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
47073 + else
47074 +#endif
47075 +
47076 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
47077 +#else
47078 + return ~0UL;
47079 +#endif
47080 +
47081 +}
47082 +
47083 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
47084 +{
47085 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
47086 +
47087 + pax_flags = pax_parse_ei_pax(elf_ex);
47088 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
47089 + xattr_pax_flags = pax_parse_xattr_pax(file);
47090 +
47091 + if (pt_pax_flags == ~0UL)
47092 + pt_pax_flags = xattr_pax_flags;
47093 + else if (xattr_pax_flags == ~0UL)
47094 + xattr_pax_flags = pt_pax_flags;
47095 + if (pt_pax_flags != xattr_pax_flags)
47096 + return -EINVAL;
47097 + if (pt_pax_flags != ~0UL)
47098 + pax_flags = pt_pax_flags;
47099 +
47100 + if (0 > pax_check_flags(&pax_flags))
47101 + return -EINVAL;
47102 +
47103 + current->mm->pax_flags = pax_flags;
47104 + return 0;
47105 +}
47106 +#endif
47107 +
47108 /*
47109 * These are the functions used to load ELF style executables and shared
47110 * libraries. There is no binary dependent code anywhere else.
47111 @@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
47112 {
47113 unsigned int random_variable = 0;
47114
47115 +#ifdef CONFIG_PAX_RANDUSTACK
47116 + if (randomize_va_space)
47117 + return stack_top - current->mm->delta_stack;
47118 +#endif
47119 +
47120 if ((current->flags & PF_RANDOMIZE) &&
47121 !(current->personality & ADDR_NO_RANDOMIZE)) {
47122 random_variable = get_random_int() & STACK_RND_MASK;
47123 @@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47124 unsigned long load_addr = 0, load_bias = 0;
47125 int load_addr_set = 0;
47126 char * elf_interpreter = NULL;
47127 - unsigned long error;
47128 + unsigned long error = 0;
47129 struct elf_phdr *elf_ppnt, *elf_phdata;
47130 unsigned long elf_bss, elf_brk;
47131 int retval, i;
47132 @@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47133 unsigned long start_code, end_code, start_data, end_data;
47134 unsigned long reloc_func_desc = 0;
47135 int executable_stack = EXSTACK_DEFAULT;
47136 - unsigned long def_flags = 0;
47137 struct {
47138 struct elfhdr elf_ex;
47139 struct elfhdr interp_elf_ex;
47140 } *loc;
47141 + unsigned long pax_task_size = TASK_SIZE;
47142
47143 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
47144 if (!loc) {
47145 @@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47146
47147 /* OK, This is the point of no return */
47148 current->flags &= ~PF_FORKNOEXEC;
47149 - current->mm->def_flags = def_flags;
47150 +
47151 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47152 + current->mm->pax_flags = 0UL;
47153 +#endif
47154 +
47155 +#ifdef CONFIG_PAX_DLRESOLVE
47156 + current->mm->call_dl_resolve = 0UL;
47157 +#endif
47158 +
47159 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
47160 + current->mm->call_syscall = 0UL;
47161 +#endif
47162 +
47163 +#ifdef CONFIG_PAX_ASLR
47164 + current->mm->delta_mmap = 0UL;
47165 + current->mm->delta_stack = 0UL;
47166 +#endif
47167 +
47168 + current->mm->def_flags = 0;
47169 +
47170 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
47171 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
47172 + send_sig(SIGKILL, current, 0);
47173 + goto out_free_dentry;
47174 + }
47175 +#endif
47176 +
47177 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47178 + pax_set_initial_flags(bprm);
47179 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
47180 + if (pax_set_initial_flags_func)
47181 + (pax_set_initial_flags_func)(bprm);
47182 +#endif
47183 +
47184 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47185 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
47186 + current->mm->context.user_cs_limit = PAGE_SIZE;
47187 + current->mm->def_flags |= VM_PAGEEXEC;
47188 + }
47189 +#endif
47190 +
47191 +#ifdef CONFIG_PAX_SEGMEXEC
47192 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
47193 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
47194 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
47195 + pax_task_size = SEGMEXEC_TASK_SIZE;
47196 + }
47197 +#endif
47198 +
47199 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
47200 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47201 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
47202 + put_cpu();
47203 + }
47204 +#endif
47205
47206 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
47207 may depend on the personality. */
47208 SET_PERSONALITY(loc->elf_ex);
47209 +
47210 +#ifdef CONFIG_PAX_ASLR
47211 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
47212 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
47213 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
47214 + }
47215 +#endif
47216 +
47217 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47218 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47219 + executable_stack = EXSTACK_DISABLE_X;
47220 + current->personality &= ~READ_IMPLIES_EXEC;
47221 + } else
47222 +#endif
47223 +
47224 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
47225 current->personality |= READ_IMPLIES_EXEC;
47226
47227 @@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47228 * might try to exec. This is because the brk will
47229 * follow the loader, and is not movable. */
47230 #ifdef CONFIG_X86
47231 - load_bias = 0;
47232 + if (current->flags & PF_RANDOMIZE)
47233 + load_bias = 0;
47234 + else
47235 + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47236 #else
47237 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47238 #endif
47239 +
47240 +#ifdef CONFIG_PAX_RANDMMAP
47241 + /* PaX: randomize base address at the default exe base if requested */
47242 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
47243 +#ifdef CONFIG_SPARC64
47244 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
47245 +#else
47246 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
47247 +#endif
47248 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
47249 + elf_flags |= MAP_FIXED;
47250 + }
47251 +#endif
47252 +
47253 }
47254
47255 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
47256 @@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47257 * allowed task size. Note that p_filesz must always be
47258 * <= p_memsz so it is only necessary to check p_memsz.
47259 */
47260 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47261 - elf_ppnt->p_memsz > TASK_SIZE ||
47262 - TASK_SIZE - elf_ppnt->p_memsz < k) {
47263 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47264 + elf_ppnt->p_memsz > pax_task_size ||
47265 + pax_task_size - elf_ppnt->p_memsz < k) {
47266 /* set_brk can never work. Avoid overflows. */
47267 send_sig(SIGKILL, current, 0);
47268 retval = -EINVAL;
47269 @@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47270 start_data += load_bias;
47271 end_data += load_bias;
47272
47273 +#ifdef CONFIG_PAX_RANDMMAP
47274 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
47275 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
47276 +#endif
47277 +
47278 /* Calling set_brk effectively mmaps the pages that we need
47279 * for the bss and break sections. We must do this before
47280 * mapping in the interpreter, to make sure it doesn't wind
47281 @@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47282 goto out_free_dentry;
47283 }
47284 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
47285 - send_sig(SIGSEGV, current, 0);
47286 - retval = -EFAULT; /* Nobody gets to see this, but.. */
47287 - goto out_free_dentry;
47288 + /*
47289 + * This bss-zeroing can fail if the ELF
47290 + * file specifies odd protections. So
47291 + * we don't check the return value
47292 + */
47293 }
47294
47295 if (elf_interpreter) {
47296 @@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
47297 unsigned long n = off;
47298 if (n > PAGE_SIZE)
47299 n = PAGE_SIZE;
47300 - if (!dump_write(file, buf, n))
47301 + if (!dump_write(file, buf, n)) {
47302 + free_page((unsigned long)buf);
47303 return 0;
47304 + }
47305 off -= n;
47306 }
47307 free_page((unsigned long)buf);
47308 @@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
47309 * Decide what to dump of a segment, part, all or none.
47310 */
47311 static unsigned long vma_dump_size(struct vm_area_struct *vma,
47312 - unsigned long mm_flags)
47313 + unsigned long mm_flags, long signr)
47314 {
47315 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
47316
47317 @@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
47318 if (vma->vm_file == NULL)
47319 return 0;
47320
47321 - if (FILTER(MAPPED_PRIVATE))
47322 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
47323 goto whole;
47324
47325 /*
47326 @@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
47327 #undef DUMP_WRITE
47328
47329 #define DUMP_WRITE(addr, nr) \
47330 + do { \
47331 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
47332 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
47333 - goto end_coredump;
47334 + goto end_coredump; \
47335 + } while (0);
47336
47337 static void fill_elf_header(struct elfhdr *elf, int segs,
47338 u16 machine, u32 flags, u8 osabi)
47339 @@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
47340 {
47341 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
47342 int i = 0;
47343 - do
47344 + do {
47345 i += 2;
47346 - while (auxv[i - 2] != AT_NULL);
47347 + } while (auxv[i - 2] != AT_NULL);
47348 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
47349 }
47350
47351 @@ -1452,7 +1926,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
47352 for (i = 1; i < view->n; ++i) {
47353 const struct user_regset *regset = &view->regsets[i];
47354 do_thread_regset_writeback(t->task, regset);
47355 - if (regset->core_note_type &&
47356 + if (regset->core_note_type && regset->get &&
47357 (!regset->active || regset->active(t->task, regset))) {
47358 int ret;
47359 size_t size = regset->n * regset->size;
47360 @@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47361 phdr.p_offset = offset;
47362 phdr.p_vaddr = vma->vm_start;
47363 phdr.p_paddr = 0;
47364 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
47365 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
47366 phdr.p_memsz = vma->vm_end - vma->vm_start;
47367 offset += phdr.p_filesz;
47368 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
47369 @@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47370 unsigned long addr;
47371 unsigned long end;
47372
47373 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
47374 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
47375
47376 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
47377 struct page *page;
47378 @@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47379 page = get_dump_page(addr);
47380 if (page) {
47381 void *kaddr = kmap(page);
47382 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
47383 stop = ((size += PAGE_SIZE) > limit) ||
47384 !dump_write(file, kaddr, PAGE_SIZE);
47385 kunmap(page);
47386 @@ -2042,6 +2517,97 @@ out:
47387
47388 #endif /* USE_ELF_CORE_DUMP */
47389
47390 +#ifdef CONFIG_PAX_MPROTECT
47391 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
47392 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
47393 + * we'll remove VM_MAYWRITE for good on RELRO segments.
47394 + *
47395 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
47396 + * basis because we want to allow the common case and not the special ones.
47397 + */
47398 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
47399 +{
47400 + struct elfhdr elf_h;
47401 + struct elf_phdr elf_p;
47402 + unsigned long i;
47403 + unsigned long oldflags;
47404 + bool is_textrel_rw, is_textrel_rx, is_relro;
47405 +
47406 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
47407 + return;
47408 +
47409 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
47410 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
47411 +
47412 +#ifdef CONFIG_PAX_ELFRELOCS
47413 + /* possible TEXTREL */
47414 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
47415 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
47416 +#else
47417 + is_textrel_rw = false;
47418 + is_textrel_rx = false;
47419 +#endif
47420 +
47421 + /* possible RELRO */
47422 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
47423 +
47424 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
47425 + return;
47426 +
47427 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
47428 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
47429 +
47430 +#ifdef CONFIG_PAX_ETEXECRELOCS
47431 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47432 +#else
47433 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
47434 +#endif
47435 +
47436 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47437 + !elf_check_arch(&elf_h) ||
47438 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
47439 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
47440 + return;
47441 +
47442 + for (i = 0UL; i < elf_h.e_phnum; i++) {
47443 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
47444 + return;
47445 + switch (elf_p.p_type) {
47446 + case PT_DYNAMIC:
47447 + if (!is_textrel_rw && !is_textrel_rx)
47448 + continue;
47449 + i = 0UL;
47450 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
47451 + elf_dyn dyn;
47452 +
47453 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
47454 + return;
47455 + if (dyn.d_tag == DT_NULL)
47456 + return;
47457 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47458 + gr_log_textrel(vma);
47459 + if (is_textrel_rw)
47460 + vma->vm_flags |= VM_MAYWRITE;
47461 + else
47462 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47463 + vma->vm_flags &= ~VM_MAYWRITE;
47464 + return;
47465 + }
47466 + i++;
47467 + }
47468 + return;
47469 +
47470 + case PT_GNU_RELRO:
47471 + if (!is_relro)
47472 + continue;
47473 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47474 + vma->vm_flags &= ~VM_MAYWRITE;
47475 + return;
47476 + }
47477 + }
47478 +}
47479 +#endif
47480 +
47481 static int __init init_elf_binfmt(void)
47482 {
47483 return register_binfmt(&elf_format);
47484 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47485 index ca88c46..f155a60 100644
47486 --- a/fs/binfmt_flat.c
47487 +++ b/fs/binfmt_flat.c
47488 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47489 realdatastart = (unsigned long) -ENOMEM;
47490 printk("Unable to allocate RAM for process data, errno %d\n",
47491 (int)-realdatastart);
47492 + down_write(&current->mm->mmap_sem);
47493 do_munmap(current->mm, textpos, text_len);
47494 + up_write(&current->mm->mmap_sem);
47495 ret = realdatastart;
47496 goto err;
47497 }
47498 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47499 }
47500 if (IS_ERR_VALUE(result)) {
47501 printk("Unable to read data+bss, errno %d\n", (int)-result);
47502 + down_write(&current->mm->mmap_sem);
47503 do_munmap(current->mm, textpos, text_len);
47504 do_munmap(current->mm, realdatastart, data_len + extra);
47505 + up_write(&current->mm->mmap_sem);
47506 ret = result;
47507 goto err;
47508 }
47509 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47510 }
47511 if (IS_ERR_VALUE(result)) {
47512 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
47513 + down_write(&current->mm->mmap_sem);
47514 do_munmap(current->mm, textpos, text_len + data_len + extra +
47515 MAX_SHARED_LIBS * sizeof(unsigned long));
47516 + up_write(&current->mm->mmap_sem);
47517 ret = result;
47518 goto err;
47519 }
47520 diff --git a/fs/bio.c b/fs/bio.c
47521 index e696713..83de133 100644
47522 --- a/fs/bio.c
47523 +++ b/fs/bio.c
47524 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
47525
47526 i = 0;
47527 while (i < bio_slab_nr) {
47528 - struct bio_slab *bslab = &bio_slabs[i];
47529 + bslab = &bio_slabs[i];
47530
47531 if (!bslab->slab && entry == -1)
47532 entry = i;
47533 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
47534 const int read = bio_data_dir(bio) == READ;
47535 struct bio_map_data *bmd = bio->bi_private;
47536 int i;
47537 - char *p = bmd->sgvecs[0].iov_base;
47538 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
47539
47540 __bio_for_each_segment(bvec, bio, i, 0) {
47541 char *addr = page_address(bvec->bv_page);
47542 diff --git a/fs/block_dev.c b/fs/block_dev.c
47543 index e65efa2..04fae57 100644
47544 --- a/fs/block_dev.c
47545 +++ b/fs/block_dev.c
47546 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
47547 else if (bdev->bd_contains == bdev)
47548 res = 0; /* is a whole device which isn't held */
47549
47550 - else if (bdev->bd_contains->bd_holder == bd_claim)
47551 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
47552 res = 0; /* is a partition of a device that is being partitioned */
47553 else if (bdev->bd_contains->bd_holder != NULL)
47554 res = -EBUSY; /* is a partition of a held device */
47555 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
47556 index c4bc570..42acd8d 100644
47557 --- a/fs/btrfs/ctree.c
47558 +++ b/fs/btrfs/ctree.c
47559 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
47560 free_extent_buffer(buf);
47561 add_root_to_dirty_list(root);
47562 } else {
47563 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
47564 - parent_start = parent->start;
47565 - else
47566 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
47567 + if (parent)
47568 + parent_start = parent->start;
47569 + else
47570 + parent_start = 0;
47571 + } else
47572 parent_start = 0;
47573
47574 WARN_ON(trans->transid != btrfs_header_generation(parent));
47575 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
47576
47577 ret = 0;
47578 if (slot == 0) {
47579 - struct btrfs_disk_key disk_key;
47580 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
47581 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
47582 }
47583 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
47584 index f447188..59c17c5 100644
47585 --- a/fs/btrfs/disk-io.c
47586 +++ b/fs/btrfs/disk-io.c
47587 @@ -39,7 +39,7 @@
47588 #include "tree-log.h"
47589 #include "free-space-cache.h"
47590
47591 -static struct extent_io_ops btree_extent_io_ops;
47592 +static const struct extent_io_ops btree_extent_io_ops;
47593 static void end_workqueue_fn(struct btrfs_work *work);
47594 static void free_fs_root(struct btrfs_root *root);
47595
47596 @@ -2607,7 +2607,7 @@ out:
47597 return 0;
47598 }
47599
47600 -static struct extent_io_ops btree_extent_io_ops = {
47601 +static const struct extent_io_ops btree_extent_io_ops = {
47602 .write_cache_pages_lock_hook = btree_lock_page_hook,
47603 .readpage_end_io_hook = btree_readpage_end_io_hook,
47604 .submit_bio_hook = btree_submit_bio_hook,
47605 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
47606 index 559f724..a026171 100644
47607 --- a/fs/btrfs/extent-tree.c
47608 +++ b/fs/btrfs/extent-tree.c
47609 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
47610 u64 group_start = group->key.objectid;
47611 new_extents = kmalloc(sizeof(*new_extents),
47612 GFP_NOFS);
47613 + if (!new_extents) {
47614 + ret = -ENOMEM;
47615 + goto out;
47616 + }
47617 nr_extents = 1;
47618 ret = get_new_locations(reloc_inode,
47619 extent_key,
47620 diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
47621 index 36de250..7ec75c7 100644
47622 --- a/fs/btrfs/extent_io.h
47623 +++ b/fs/btrfs/extent_io.h
47624 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
47625 struct bio *bio, int mirror_num,
47626 unsigned long bio_flags);
47627 struct extent_io_ops {
47628 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
47629 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
47630 u64 start, u64 end, int *page_started,
47631 unsigned long *nr_written);
47632 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
47633 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47634 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47635 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47636 extent_submit_bio_hook_t *submit_bio_hook;
47637 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
47638 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47639 size_t size, struct bio *bio,
47640 unsigned long bio_flags);
47641 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47642 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47643 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47644 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47645 u64 start, u64 end,
47646 struct extent_state *state);
47647 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47648 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47649 u64 start, u64 end,
47650 struct extent_state *state);
47651 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47652 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47653 struct extent_state *state);
47654 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47655 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47656 struct extent_state *state, int uptodate);
47657 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47658 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47659 unsigned long old, unsigned long bits);
47660 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47661 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47662 unsigned long bits);
47663 - int (*merge_extent_hook)(struct inode *inode,
47664 + int (* const merge_extent_hook)(struct inode *inode,
47665 struct extent_state *new,
47666 struct extent_state *other);
47667 - int (*split_extent_hook)(struct inode *inode,
47668 + int (* const split_extent_hook)(struct inode *inode,
47669 struct extent_state *orig, u64 split);
47670 - int (*write_cache_pages_lock_hook)(struct page *page);
47671 + int (* const write_cache_pages_lock_hook)(struct page *page);
47672 };
47673
47674 struct extent_io_tree {
47675 @@ -88,7 +88,7 @@ struct extent_io_tree {
47676 u64 dirty_bytes;
47677 spinlock_t lock;
47678 spinlock_t buffer_lock;
47679 - struct extent_io_ops *ops;
47680 + const struct extent_io_ops *ops;
47681 };
47682
47683 struct extent_state {
47684 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47685 index cb2849f..3718fb4 100644
47686 --- a/fs/btrfs/free-space-cache.c
47687 +++ b/fs/btrfs/free-space-cache.c
47688 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47689
47690 while(1) {
47691 if (entry->bytes < bytes || entry->offset < min_start) {
47692 - struct rb_node *node;
47693 -
47694 node = rb_next(&entry->offset_index);
47695 if (!node)
47696 break;
47697 @@ -1226,7 +1224,7 @@ again:
47698 */
47699 while (entry->bitmap || found_bitmap ||
47700 (!entry->bitmap && entry->bytes < min_bytes)) {
47701 - struct rb_node *node = rb_next(&entry->offset_index);
47702 + node = rb_next(&entry->offset_index);
47703
47704 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47705 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47706 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47707 index e03a836..323837e 100644
47708 --- a/fs/btrfs/inode.c
47709 +++ b/fs/btrfs/inode.c
47710 @@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47711 static const struct address_space_operations btrfs_aops;
47712 static const struct address_space_operations btrfs_symlink_aops;
47713 static const struct file_operations btrfs_dir_file_operations;
47714 -static struct extent_io_ops btrfs_extent_io_ops;
47715 +static const struct extent_io_ops btrfs_extent_io_ops;
47716
47717 static struct kmem_cache *btrfs_inode_cachep;
47718 struct kmem_cache *btrfs_trans_handle_cachep;
47719 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47720 1, 0, NULL, GFP_NOFS);
47721 while (start < end) {
47722 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47723 + BUG_ON(!async_cow);
47724 async_cow->inode = inode;
47725 async_cow->root = root;
47726 async_cow->locked_page = locked_page;
47727 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47728 inline_size = btrfs_file_extent_inline_item_len(leaf,
47729 btrfs_item_nr(leaf, path->slots[0]));
47730 tmp = kmalloc(inline_size, GFP_NOFS);
47731 + if (!tmp)
47732 + return -ENOMEM;
47733 ptr = btrfs_file_extent_inline_start(item);
47734
47735 read_extent_buffer(leaf, tmp, ptr, inline_size);
47736 @@ -5410,7 +5413,7 @@ fail:
47737 return -ENOMEM;
47738 }
47739
47740 -static int btrfs_getattr(struct vfsmount *mnt,
47741 +int btrfs_getattr(struct vfsmount *mnt,
47742 struct dentry *dentry, struct kstat *stat)
47743 {
47744 struct inode *inode = dentry->d_inode;
47745 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47746 return 0;
47747 }
47748
47749 +EXPORT_SYMBOL(btrfs_getattr);
47750 +
47751 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
47752 +{
47753 + return BTRFS_I(inode)->root->anon_super.s_dev;
47754 +}
47755 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47756 +
47757 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47758 struct inode *new_dir, struct dentry *new_dentry)
47759 {
47760 @@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47761 .fsync = btrfs_sync_file,
47762 };
47763
47764 -static struct extent_io_ops btrfs_extent_io_ops = {
47765 +static const struct extent_io_ops btrfs_extent_io_ops = {
47766 .fill_delalloc = run_delalloc_range,
47767 .submit_bio_hook = btrfs_submit_bio_hook,
47768 .merge_bio_hook = btrfs_merge_bio_hook,
47769 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47770 index ab7ab53..94e0781 100644
47771 --- a/fs/btrfs/relocation.c
47772 +++ b/fs/btrfs/relocation.c
47773 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47774 }
47775 spin_unlock(&rc->reloc_root_tree.lock);
47776
47777 - BUG_ON((struct btrfs_root *)node->data != root);
47778 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
47779
47780 if (!del) {
47781 spin_lock(&rc->reloc_root_tree.lock);
47782 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47783 index a240b6f..4ce16ef 100644
47784 --- a/fs/btrfs/sysfs.c
47785 +++ b/fs/btrfs/sysfs.c
47786 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47787 complete(&root->kobj_unregister);
47788 }
47789
47790 -static struct sysfs_ops btrfs_super_attr_ops = {
47791 +static const struct sysfs_ops btrfs_super_attr_ops = {
47792 .show = btrfs_super_attr_show,
47793 .store = btrfs_super_attr_store,
47794 };
47795
47796 -static struct sysfs_ops btrfs_root_attr_ops = {
47797 +static const struct sysfs_ops btrfs_root_attr_ops = {
47798 .show = btrfs_root_attr_show,
47799 .store = btrfs_root_attr_store,
47800 };
47801 diff --git a/fs/buffer.c b/fs/buffer.c
47802 index 6fa5302..395d9f6 100644
47803 --- a/fs/buffer.c
47804 +++ b/fs/buffer.c
47805 @@ -25,6 +25,7 @@
47806 #include <linux/percpu.h>
47807 #include <linux/slab.h>
47808 #include <linux/capability.h>
47809 +#include <linux/security.h>
47810 #include <linux/blkdev.h>
47811 #include <linux/file.h>
47812 #include <linux/quotaops.h>
47813 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47814 index 3797e00..ce776f6 100644
47815 --- a/fs/cachefiles/bind.c
47816 +++ b/fs/cachefiles/bind.c
47817 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47818 args);
47819
47820 /* start by checking things over */
47821 - ASSERT(cache->fstop_percent >= 0 &&
47822 - cache->fstop_percent < cache->fcull_percent &&
47823 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
47824 cache->fcull_percent < cache->frun_percent &&
47825 cache->frun_percent < 100);
47826
47827 - ASSERT(cache->bstop_percent >= 0 &&
47828 - cache->bstop_percent < cache->bcull_percent &&
47829 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
47830 cache->bcull_percent < cache->brun_percent &&
47831 cache->brun_percent < 100);
47832
47833 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47834 index 4618516..bb30d01 100644
47835 --- a/fs/cachefiles/daemon.c
47836 +++ b/fs/cachefiles/daemon.c
47837 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47838 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47839 return -EIO;
47840
47841 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
47842 + if (datalen > PAGE_SIZE - 1)
47843 return -EOPNOTSUPP;
47844
47845 /* drag the command string into the kernel so we can parse it */
47846 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47847 if (args[0] != '%' || args[1] != '\0')
47848 return -EINVAL;
47849
47850 - if (fstop < 0 || fstop >= cache->fcull_percent)
47851 + if (fstop >= cache->fcull_percent)
47852 return cachefiles_daemon_range_error(cache, args);
47853
47854 cache->fstop_percent = fstop;
47855 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47856 if (args[0] != '%' || args[1] != '\0')
47857 return -EINVAL;
47858
47859 - if (bstop < 0 || bstop >= cache->bcull_percent)
47860 + if (bstop >= cache->bcull_percent)
47861 return cachefiles_daemon_range_error(cache, args);
47862
47863 cache->bstop_percent = bstop;
47864 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47865 index f7c255f..fcd61de 100644
47866 --- a/fs/cachefiles/internal.h
47867 +++ b/fs/cachefiles/internal.h
47868 @@ -56,7 +56,7 @@ struct cachefiles_cache {
47869 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47870 struct rb_root active_nodes; /* active nodes (can't be culled) */
47871 rwlock_t active_lock; /* lock for active_nodes */
47872 - atomic_t gravecounter; /* graveyard uniquifier */
47873 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47874 unsigned frun_percent; /* when to stop culling (% files) */
47875 unsigned fcull_percent; /* when to start culling (% files) */
47876 unsigned fstop_percent; /* when to stop allocating (% files) */
47877 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47878 * proc.c
47879 */
47880 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47881 -extern atomic_t cachefiles_lookup_histogram[HZ];
47882 -extern atomic_t cachefiles_mkdir_histogram[HZ];
47883 -extern atomic_t cachefiles_create_histogram[HZ];
47884 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47885 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47886 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47887
47888 extern int __init cachefiles_proc_init(void);
47889 extern void cachefiles_proc_cleanup(void);
47890 static inline
47891 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47892 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47893 {
47894 unsigned long jif = jiffies - start_jif;
47895 if (jif >= HZ)
47896 jif = HZ - 1;
47897 - atomic_inc(&histogram[jif]);
47898 + atomic_inc_unchecked(&histogram[jif]);
47899 }
47900
47901 #else
47902 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47903 index 14ac480..a62766c 100644
47904 --- a/fs/cachefiles/namei.c
47905 +++ b/fs/cachefiles/namei.c
47906 @@ -250,7 +250,7 @@ try_again:
47907 /* first step is to make up a grave dentry in the graveyard */
47908 sprintf(nbuffer, "%08x%08x",
47909 (uint32_t) get_seconds(),
47910 - (uint32_t) atomic_inc_return(&cache->gravecounter));
47911 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47912
47913 /* do the multiway lock magic */
47914 trap = lock_rename(cache->graveyard, dir);
47915 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47916 index eccd339..4c1d995 100644
47917 --- a/fs/cachefiles/proc.c
47918 +++ b/fs/cachefiles/proc.c
47919 @@ -14,9 +14,9 @@
47920 #include <linux/seq_file.h>
47921 #include "internal.h"
47922
47923 -atomic_t cachefiles_lookup_histogram[HZ];
47924 -atomic_t cachefiles_mkdir_histogram[HZ];
47925 -atomic_t cachefiles_create_histogram[HZ];
47926 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47927 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47928 +atomic_unchecked_t cachefiles_create_histogram[HZ];
47929
47930 /*
47931 * display the latency histogram
47932 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47933 return 0;
47934 default:
47935 index = (unsigned long) v - 3;
47936 - x = atomic_read(&cachefiles_lookup_histogram[index]);
47937 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
47938 - z = atomic_read(&cachefiles_create_histogram[index]);
47939 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47940 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47941 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47942 if (x == 0 && y == 0 && z == 0)
47943 return 0;
47944
47945 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47946 index a6c8c6f..5cf8517 100644
47947 --- a/fs/cachefiles/rdwr.c
47948 +++ b/fs/cachefiles/rdwr.c
47949 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47950 old_fs = get_fs();
47951 set_fs(KERNEL_DS);
47952 ret = file->f_op->write(
47953 - file, (const void __user *) data, len, &pos);
47954 + file, (const void __force_user *) data, len, &pos);
47955 set_fs(old_fs);
47956 kunmap(page);
47957 if (ret != len)
47958 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47959 index 42cec2a..2aba466 100644
47960 --- a/fs/cifs/cifs_debug.c
47961 +++ b/fs/cifs/cifs_debug.c
47962 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47963 tcon = list_entry(tmp3,
47964 struct cifsTconInfo,
47965 tcon_list);
47966 - atomic_set(&tcon->num_smbs_sent, 0);
47967 - atomic_set(&tcon->num_writes, 0);
47968 - atomic_set(&tcon->num_reads, 0);
47969 - atomic_set(&tcon->num_oplock_brks, 0);
47970 - atomic_set(&tcon->num_opens, 0);
47971 - atomic_set(&tcon->num_posixopens, 0);
47972 - atomic_set(&tcon->num_posixmkdirs, 0);
47973 - atomic_set(&tcon->num_closes, 0);
47974 - atomic_set(&tcon->num_deletes, 0);
47975 - atomic_set(&tcon->num_mkdirs, 0);
47976 - atomic_set(&tcon->num_rmdirs, 0);
47977 - atomic_set(&tcon->num_renames, 0);
47978 - atomic_set(&tcon->num_t2renames, 0);
47979 - atomic_set(&tcon->num_ffirst, 0);
47980 - atomic_set(&tcon->num_fnext, 0);
47981 - atomic_set(&tcon->num_fclose, 0);
47982 - atomic_set(&tcon->num_hardlinks, 0);
47983 - atomic_set(&tcon->num_symlinks, 0);
47984 - atomic_set(&tcon->num_locks, 0);
47985 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47986 + atomic_set_unchecked(&tcon->num_writes, 0);
47987 + atomic_set_unchecked(&tcon->num_reads, 0);
47988 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47989 + atomic_set_unchecked(&tcon->num_opens, 0);
47990 + atomic_set_unchecked(&tcon->num_posixopens, 0);
47991 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47992 + atomic_set_unchecked(&tcon->num_closes, 0);
47993 + atomic_set_unchecked(&tcon->num_deletes, 0);
47994 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
47995 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
47996 + atomic_set_unchecked(&tcon->num_renames, 0);
47997 + atomic_set_unchecked(&tcon->num_t2renames, 0);
47998 + atomic_set_unchecked(&tcon->num_ffirst, 0);
47999 + atomic_set_unchecked(&tcon->num_fnext, 0);
48000 + atomic_set_unchecked(&tcon->num_fclose, 0);
48001 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
48002 + atomic_set_unchecked(&tcon->num_symlinks, 0);
48003 + atomic_set_unchecked(&tcon->num_locks, 0);
48004 }
48005 }
48006 }
48007 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
48008 if (tcon->need_reconnect)
48009 seq_puts(m, "\tDISCONNECTED ");
48010 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
48011 - atomic_read(&tcon->num_smbs_sent),
48012 - atomic_read(&tcon->num_oplock_brks));
48013 + atomic_read_unchecked(&tcon->num_smbs_sent),
48014 + atomic_read_unchecked(&tcon->num_oplock_brks));
48015 seq_printf(m, "\nReads: %d Bytes: %lld",
48016 - atomic_read(&tcon->num_reads),
48017 + atomic_read_unchecked(&tcon->num_reads),
48018 (long long)(tcon->bytes_read));
48019 seq_printf(m, "\nWrites: %d Bytes: %lld",
48020 - atomic_read(&tcon->num_writes),
48021 + atomic_read_unchecked(&tcon->num_writes),
48022 (long long)(tcon->bytes_written));
48023 seq_printf(m, "\nFlushes: %d",
48024 - atomic_read(&tcon->num_flushes));
48025 + atomic_read_unchecked(&tcon->num_flushes));
48026 seq_printf(m, "\nLocks: %d HardLinks: %d "
48027 "Symlinks: %d",
48028 - atomic_read(&tcon->num_locks),
48029 - atomic_read(&tcon->num_hardlinks),
48030 - atomic_read(&tcon->num_symlinks));
48031 + atomic_read_unchecked(&tcon->num_locks),
48032 + atomic_read_unchecked(&tcon->num_hardlinks),
48033 + atomic_read_unchecked(&tcon->num_symlinks));
48034 seq_printf(m, "\nOpens: %d Closes: %d "
48035 "Deletes: %d",
48036 - atomic_read(&tcon->num_opens),
48037 - atomic_read(&tcon->num_closes),
48038 - atomic_read(&tcon->num_deletes));
48039 + atomic_read_unchecked(&tcon->num_opens),
48040 + atomic_read_unchecked(&tcon->num_closes),
48041 + atomic_read_unchecked(&tcon->num_deletes));
48042 seq_printf(m, "\nPosix Opens: %d "
48043 "Posix Mkdirs: %d",
48044 - atomic_read(&tcon->num_posixopens),
48045 - atomic_read(&tcon->num_posixmkdirs));
48046 + atomic_read_unchecked(&tcon->num_posixopens),
48047 + atomic_read_unchecked(&tcon->num_posixmkdirs));
48048 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
48049 - atomic_read(&tcon->num_mkdirs),
48050 - atomic_read(&tcon->num_rmdirs));
48051 + atomic_read_unchecked(&tcon->num_mkdirs),
48052 + atomic_read_unchecked(&tcon->num_rmdirs));
48053 seq_printf(m, "\nRenames: %d T2 Renames %d",
48054 - atomic_read(&tcon->num_renames),
48055 - atomic_read(&tcon->num_t2renames));
48056 + atomic_read_unchecked(&tcon->num_renames),
48057 + atomic_read_unchecked(&tcon->num_t2renames));
48058 seq_printf(m, "\nFindFirst: %d FNext %d "
48059 "FClose %d",
48060 - atomic_read(&tcon->num_ffirst),
48061 - atomic_read(&tcon->num_fnext),
48062 - atomic_read(&tcon->num_fclose));
48063 + atomic_read_unchecked(&tcon->num_ffirst),
48064 + atomic_read_unchecked(&tcon->num_fnext),
48065 + atomic_read_unchecked(&tcon->num_fclose));
48066 }
48067 }
48068 }
48069 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
48070 index 1445407..68cb0dc 100644
48071 --- a/fs/cifs/cifsfs.c
48072 +++ b/fs/cifs/cifsfs.c
48073 @@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
48074 cifs_req_cachep = kmem_cache_create("cifs_request",
48075 CIFSMaxBufSize +
48076 MAX_CIFS_HDR_SIZE, 0,
48077 - SLAB_HWCACHE_ALIGN, NULL);
48078 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
48079 if (cifs_req_cachep == NULL)
48080 return -ENOMEM;
48081
48082 @@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
48083 efficient to alloc 1 per page off the slab compared to 17K (5page)
48084 alloc of large cifs buffers even when page debugging is on */
48085 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
48086 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
48087 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
48088 NULL);
48089 if (cifs_sm_req_cachep == NULL) {
48090 mempool_destroy(cifs_req_poolp);
48091 @@ -991,8 +991,8 @@ init_cifs(void)
48092 atomic_set(&bufAllocCount, 0);
48093 atomic_set(&smBufAllocCount, 0);
48094 #ifdef CONFIG_CIFS_STATS2
48095 - atomic_set(&totBufAllocCount, 0);
48096 - atomic_set(&totSmBufAllocCount, 0);
48097 + atomic_set_unchecked(&totBufAllocCount, 0);
48098 + atomic_set_unchecked(&totSmBufAllocCount, 0);
48099 #endif /* CONFIG_CIFS_STATS2 */
48100
48101 atomic_set(&midCount, 0);
48102 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
48103 index e29581e..1c22bab 100644
48104 --- a/fs/cifs/cifsglob.h
48105 +++ b/fs/cifs/cifsglob.h
48106 @@ -252,28 +252,28 @@ struct cifsTconInfo {
48107 __u16 Flags; /* optional support bits */
48108 enum statusEnum tidStatus;
48109 #ifdef CONFIG_CIFS_STATS
48110 - atomic_t num_smbs_sent;
48111 - atomic_t num_writes;
48112 - atomic_t num_reads;
48113 - atomic_t num_flushes;
48114 - atomic_t num_oplock_brks;
48115 - atomic_t num_opens;
48116 - atomic_t num_closes;
48117 - atomic_t num_deletes;
48118 - atomic_t num_mkdirs;
48119 - atomic_t num_posixopens;
48120 - atomic_t num_posixmkdirs;
48121 - atomic_t num_rmdirs;
48122 - atomic_t num_renames;
48123 - atomic_t num_t2renames;
48124 - atomic_t num_ffirst;
48125 - atomic_t num_fnext;
48126 - atomic_t num_fclose;
48127 - atomic_t num_hardlinks;
48128 - atomic_t num_symlinks;
48129 - atomic_t num_locks;
48130 - atomic_t num_acl_get;
48131 - atomic_t num_acl_set;
48132 + atomic_unchecked_t num_smbs_sent;
48133 + atomic_unchecked_t num_writes;
48134 + atomic_unchecked_t num_reads;
48135 + atomic_unchecked_t num_flushes;
48136 + atomic_unchecked_t num_oplock_brks;
48137 + atomic_unchecked_t num_opens;
48138 + atomic_unchecked_t num_closes;
48139 + atomic_unchecked_t num_deletes;
48140 + atomic_unchecked_t num_mkdirs;
48141 + atomic_unchecked_t num_posixopens;
48142 + atomic_unchecked_t num_posixmkdirs;
48143 + atomic_unchecked_t num_rmdirs;
48144 + atomic_unchecked_t num_renames;
48145 + atomic_unchecked_t num_t2renames;
48146 + atomic_unchecked_t num_ffirst;
48147 + atomic_unchecked_t num_fnext;
48148 + atomic_unchecked_t num_fclose;
48149 + atomic_unchecked_t num_hardlinks;
48150 + atomic_unchecked_t num_symlinks;
48151 + atomic_unchecked_t num_locks;
48152 + atomic_unchecked_t num_acl_get;
48153 + atomic_unchecked_t num_acl_set;
48154 #ifdef CONFIG_CIFS_STATS2
48155 unsigned long long time_writes;
48156 unsigned long long time_reads;
48157 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
48158 }
48159
48160 #ifdef CONFIG_CIFS_STATS
48161 -#define cifs_stats_inc atomic_inc
48162 +#define cifs_stats_inc atomic_inc_unchecked
48163
48164 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
48165 unsigned int bytes)
48166 @@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
48167 /* Various Debug counters */
48168 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
48169 #ifdef CONFIG_CIFS_STATS2
48170 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
48171 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
48172 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
48173 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
48174 #endif
48175 GLOBAL_EXTERN atomic_t smBufAllocCount;
48176 GLOBAL_EXTERN atomic_t midCount;
48177 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
48178 index fc1e048..28b3441 100644
48179 --- a/fs/cifs/link.c
48180 +++ b/fs/cifs/link.c
48181 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
48182
48183 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
48184 {
48185 - char *p = nd_get_link(nd);
48186 + const char *p = nd_get_link(nd);
48187 if (!IS_ERR(p))
48188 kfree(p);
48189 }
48190 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
48191 index 95b82e8..12a538d 100644
48192 --- a/fs/cifs/misc.c
48193 +++ b/fs/cifs/misc.c
48194 @@ -155,7 +155,7 @@ cifs_buf_get(void)
48195 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
48196 atomic_inc(&bufAllocCount);
48197 #ifdef CONFIG_CIFS_STATS2
48198 - atomic_inc(&totBufAllocCount);
48199 + atomic_inc_unchecked(&totBufAllocCount);
48200 #endif /* CONFIG_CIFS_STATS2 */
48201 }
48202
48203 @@ -190,7 +190,7 @@ cifs_small_buf_get(void)
48204 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
48205 atomic_inc(&smBufAllocCount);
48206 #ifdef CONFIG_CIFS_STATS2
48207 - atomic_inc(&totSmBufAllocCount);
48208 + atomic_inc_unchecked(&totSmBufAllocCount);
48209 #endif /* CONFIG_CIFS_STATS2 */
48210
48211 }
48212 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
48213 index a5bf577..6d19845 100644
48214 --- a/fs/coda/cache.c
48215 +++ b/fs/coda/cache.c
48216 @@ -24,14 +24,14 @@
48217 #include <linux/coda_fs_i.h>
48218 #include <linux/coda_cache.h>
48219
48220 -static atomic_t permission_epoch = ATOMIC_INIT(0);
48221 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
48222
48223 /* replace or extend an acl cache hit */
48224 void coda_cache_enter(struct inode *inode, int mask)
48225 {
48226 struct coda_inode_info *cii = ITOC(inode);
48227
48228 - cii->c_cached_epoch = atomic_read(&permission_epoch);
48229 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
48230 if (cii->c_uid != current_fsuid()) {
48231 cii->c_uid = current_fsuid();
48232 cii->c_cached_perm = mask;
48233 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
48234 void coda_cache_clear_inode(struct inode *inode)
48235 {
48236 struct coda_inode_info *cii = ITOC(inode);
48237 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
48238 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
48239 }
48240
48241 /* remove all acl caches */
48242 void coda_cache_clear_all(struct super_block *sb)
48243 {
48244 - atomic_inc(&permission_epoch);
48245 + atomic_inc_unchecked(&permission_epoch);
48246 }
48247
48248
48249 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
48250
48251 hit = (mask & cii->c_cached_perm) == mask &&
48252 cii->c_uid == current_fsuid() &&
48253 - cii->c_cached_epoch == atomic_read(&permission_epoch);
48254 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
48255
48256 return hit;
48257 }
48258 diff --git a/fs/compat.c b/fs/compat.c
48259 index d1e2411..c2ef8ed 100644
48260 --- a/fs/compat.c
48261 +++ b/fs/compat.c
48262 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
48263 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
48264 {
48265 compat_ino_t ino = stat->ino;
48266 - typeof(ubuf->st_uid) uid = 0;
48267 - typeof(ubuf->st_gid) gid = 0;
48268 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
48269 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
48270 int err;
48271
48272 SET_UID(uid, stat->uid);
48273 @@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
48274
48275 set_fs(KERNEL_DS);
48276 /* The __user pointer cast is valid because of the set_fs() */
48277 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
48278 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
48279 set_fs(oldfs);
48280 /* truncating is ok because it's a user address */
48281 if (!ret)
48282 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
48283
48284 struct compat_readdir_callback {
48285 struct compat_old_linux_dirent __user *dirent;
48286 + struct file * file;
48287 int result;
48288 };
48289
48290 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
48291 buf->result = -EOVERFLOW;
48292 return -EOVERFLOW;
48293 }
48294 +
48295 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48296 + return 0;
48297 +
48298 buf->result++;
48299 dirent = buf->dirent;
48300 if (!access_ok(VERIFY_WRITE, dirent,
48301 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
48302
48303 buf.result = 0;
48304 buf.dirent = dirent;
48305 + buf.file = file;
48306
48307 error = vfs_readdir(file, compat_fillonedir, &buf);
48308 if (buf.result)
48309 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
48310 struct compat_getdents_callback {
48311 struct compat_linux_dirent __user *current_dir;
48312 struct compat_linux_dirent __user *previous;
48313 + struct file * file;
48314 int count;
48315 int error;
48316 };
48317 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
48318 buf->error = -EOVERFLOW;
48319 return -EOVERFLOW;
48320 }
48321 +
48322 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48323 + return 0;
48324 +
48325 dirent = buf->previous;
48326 if (dirent) {
48327 if (__put_user(offset, &dirent->d_off))
48328 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
48329 buf.previous = NULL;
48330 buf.count = count;
48331 buf.error = 0;
48332 + buf.file = file;
48333
48334 error = vfs_readdir(file, compat_filldir, &buf);
48335 if (error >= 0)
48336 @@ -987,6 +999,7 @@ out:
48337 struct compat_getdents_callback64 {
48338 struct linux_dirent64 __user *current_dir;
48339 struct linux_dirent64 __user *previous;
48340 + struct file * file;
48341 int count;
48342 int error;
48343 };
48344 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
48345 buf->error = -EINVAL; /* only used if we fail.. */
48346 if (reclen > buf->count)
48347 return -EINVAL;
48348 +
48349 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48350 + return 0;
48351 +
48352 dirent = buf->previous;
48353
48354 if (dirent) {
48355 @@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
48356 buf.previous = NULL;
48357 buf.count = count;
48358 buf.error = 0;
48359 + buf.file = file;
48360
48361 error = vfs_readdir(file, compat_filldir64, &buf);
48362 if (error >= 0)
48363 error = buf.error;
48364 lastdirent = buf.previous;
48365 if (lastdirent) {
48366 - typeof(lastdirent->d_off) d_off = file->f_pos;
48367 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48368 if (__put_user_unaligned(d_off, &lastdirent->d_off))
48369 error = -EFAULT;
48370 else
48371 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
48372 * verify all the pointers
48373 */
48374 ret = -EINVAL;
48375 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
48376 + if (nr_segs > UIO_MAXIOV)
48377 goto out;
48378 if (!file->f_op)
48379 goto out;
48380 @@ -1454,6 +1472,10 @@ out:
48381 return ret;
48382 }
48383
48384 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48385 +extern atomic64_unchecked_t global_exec_counter;
48386 +#endif
48387 +
48388 /*
48389 * compat_do_execve() is mostly a copy of do_execve(), with the exception
48390 * that it processes 32 bit argv and envp pointers.
48391 @@ -1463,11 +1485,35 @@ int compat_do_execve(char * filename,
48392 compat_uptr_t __user *envp,
48393 struct pt_regs * regs)
48394 {
48395 +#ifdef CONFIG_GRKERNSEC
48396 + struct file *old_exec_file;
48397 + struct acl_subject_label *old_acl;
48398 + struct rlimit old_rlim[RLIM_NLIMITS];
48399 +#endif
48400 struct linux_binprm *bprm;
48401 struct file *file;
48402 struct files_struct *displaced;
48403 bool clear_in_exec;
48404 int retval;
48405 + const struct cred *cred = current_cred();
48406 +
48407 + /*
48408 + * We move the actual failure in case of RLIMIT_NPROC excess from
48409 + * set*uid() to execve() because too many poorly written programs
48410 + * don't check setuid() return code. Here we additionally recheck
48411 + * whether NPROC limit is still exceeded.
48412 + */
48413 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48414 +
48415 + if ((current->flags & PF_NPROC_EXCEEDED) &&
48416 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48417 + retval = -EAGAIN;
48418 + goto out_ret;
48419 + }
48420 +
48421 + /* We're below the limit (still or again), so we don't want to make
48422 + * further execve() calls fail. */
48423 + current->flags &= ~PF_NPROC_EXCEEDED;
48424
48425 retval = unshare_files(&displaced);
48426 if (retval)
48427 @@ -1493,12 +1539,26 @@ int compat_do_execve(char * filename,
48428 if (IS_ERR(file))
48429 goto out_unmark;
48430
48431 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
48432 + retval = -EPERM;
48433 + goto out_file;
48434 + }
48435 +
48436 sched_exec();
48437
48438 bprm->file = file;
48439 bprm->filename = filename;
48440 bprm->interp = filename;
48441
48442 + if (gr_process_user_ban()) {
48443 + retval = -EPERM;
48444 + goto out_file;
48445 + }
48446 +
48447 + retval = -EACCES;
48448 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
48449 + goto out_file;
48450 +
48451 retval = bprm_mm_init(bprm);
48452 if (retval)
48453 goto out_file;
48454 @@ -1528,11 +1588,45 @@ int compat_do_execve(char * filename,
48455 if (retval < 0)
48456 goto out;
48457
48458 + if (!gr_tpe_allow(file)) {
48459 + retval = -EACCES;
48460 + goto out;
48461 + }
48462 +
48463 + if (gr_check_crash_exec(file)) {
48464 + retval = -EACCES;
48465 + goto out;
48466 + }
48467 +
48468 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48469 +
48470 + gr_handle_exec_args_compat(bprm, argv);
48471 +
48472 +#ifdef CONFIG_GRKERNSEC
48473 + old_acl = current->acl;
48474 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48475 + old_exec_file = current->exec_file;
48476 + get_file(file);
48477 + current->exec_file = file;
48478 +#endif
48479 +
48480 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48481 + bprm->unsafe);
48482 + if (retval < 0)
48483 + goto out_fail;
48484 +
48485 retval = search_binary_handler(bprm, regs);
48486 if (retval < 0)
48487 - goto out;
48488 + goto out_fail;
48489 +#ifdef CONFIG_GRKERNSEC
48490 + if (old_exec_file)
48491 + fput(old_exec_file);
48492 +#endif
48493
48494 /* execve succeeded */
48495 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48496 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
48497 +#endif
48498 current->fs->in_exec = 0;
48499 current->in_execve = 0;
48500 acct_update_integrals(current);
48501 @@ -1541,6 +1635,14 @@ int compat_do_execve(char * filename,
48502 put_files_struct(displaced);
48503 return retval;
48504
48505 +out_fail:
48506 +#ifdef CONFIG_GRKERNSEC
48507 + current->acl = old_acl;
48508 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48509 + fput(current->exec_file);
48510 + current->exec_file = old_exec_file;
48511 +#endif
48512 +
48513 out:
48514 if (bprm->mm) {
48515 acct_arg_size(bprm, 0);
48516 @@ -1711,6 +1813,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
48517 struct fdtable *fdt;
48518 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48519
48520 + pax_track_stack();
48521 +
48522 if (n < 0)
48523 goto out_nofds;
48524
48525 @@ -2151,7 +2255,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
48526 oldfs = get_fs();
48527 set_fs(KERNEL_DS);
48528 /* The __user pointer casts are valid because of the set_fs() */
48529 - err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
48530 + err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
48531 set_fs(oldfs);
48532
48533 if (err)
48534 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
48535 index 0adced2..bbb1b0d 100644
48536 --- a/fs/compat_binfmt_elf.c
48537 +++ b/fs/compat_binfmt_elf.c
48538 @@ -29,10 +29,12 @@
48539 #undef elfhdr
48540 #undef elf_phdr
48541 #undef elf_note
48542 +#undef elf_dyn
48543 #undef elf_addr_t
48544 #define elfhdr elf32_hdr
48545 #define elf_phdr elf32_phdr
48546 #define elf_note elf32_note
48547 +#define elf_dyn Elf32_Dyn
48548 #define elf_addr_t Elf32_Addr
48549
48550 /*
48551 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
48552 index d84e705..d8c364c 100644
48553 --- a/fs/compat_ioctl.c
48554 +++ b/fs/compat_ioctl.c
48555 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
48556 up = (struct compat_video_spu_palette __user *) arg;
48557 err = get_user(palp, &up->palette);
48558 err |= get_user(length, &up->length);
48559 + if (err)
48560 + return -EFAULT;
48561
48562 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
48563 err = put_user(compat_ptr(palp), &up_native->palette);
48564 @@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
48565 return -EFAULT;
48566 if (__get_user(udata, &ss32->iomem_base))
48567 return -EFAULT;
48568 - ss.iomem_base = compat_ptr(udata);
48569 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
48570 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
48571 __get_user(ss.port_high, &ss32->port_high))
48572 return -EFAULT;
48573 @@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
48574 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
48575 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
48576 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
48577 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48578 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48579 return -EFAULT;
48580
48581 return ioctl_preallocate(file, p);
48582 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
48583 index 8e48b52..f01ed91 100644
48584 --- a/fs/configfs/dir.c
48585 +++ b/fs/configfs/dir.c
48586 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48587 }
48588 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
48589 struct configfs_dirent *next;
48590 - const char * name;
48591 + const unsigned char * name;
48592 + char d_name[sizeof(next->s_dentry->d_iname)];
48593 int len;
48594
48595 next = list_entry(p, struct configfs_dirent,
48596 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48597 continue;
48598
48599 name = configfs_get_name(next);
48600 - len = strlen(name);
48601 + if (next->s_dentry && name == next->s_dentry->d_iname) {
48602 + len = next->s_dentry->d_name.len;
48603 + memcpy(d_name, name, len);
48604 + name = d_name;
48605 + } else
48606 + len = strlen(name);
48607 if (next->s_dentry)
48608 ino = next->s_dentry->d_inode->i_ino;
48609 else
48610 diff --git a/fs/dcache.c b/fs/dcache.c
48611 index 44c0aea..2529092 100644
48612 --- a/fs/dcache.c
48613 +++ b/fs/dcache.c
48614 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
48615
48616 static struct kmem_cache *dentry_cache __read_mostly;
48617
48618 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48619 -
48620 /*
48621 * This is the single most critical data structure when it comes
48622 * to the dcache: the hashtable for lookups. Somebody should try
48623 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
48624 mempages -= reserve;
48625
48626 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
48627 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
48628 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
48629
48630 dcache_init();
48631 inode_init();
48632 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
48633 index 39c6ee8..dcee0f1 100644
48634 --- a/fs/debugfs/inode.c
48635 +++ b/fs/debugfs/inode.c
48636 @@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
48637 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
48638 {
48639 return debugfs_create_file(name,
48640 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48641 + S_IFDIR | S_IRWXU,
48642 +#else
48643 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
48644 +#endif
48645 parent, NULL, NULL);
48646 }
48647 EXPORT_SYMBOL_GPL(debugfs_create_dir);
48648 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
48649 index c010ecf..a8d8c59 100644
48650 --- a/fs/dlm/lockspace.c
48651 +++ b/fs/dlm/lockspace.c
48652 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
48653 kfree(ls);
48654 }
48655
48656 -static struct sysfs_ops dlm_attr_ops = {
48657 +static const struct sysfs_ops dlm_attr_ops = {
48658 .show = dlm_attr_show,
48659 .store = dlm_attr_store,
48660 };
48661 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
48662 index 7a5f1ac..62fa913 100644
48663 --- a/fs/ecryptfs/crypto.c
48664 +++ b/fs/ecryptfs/crypto.c
48665 @@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48666 rc);
48667 goto out;
48668 }
48669 - if (unlikely(ecryptfs_verbosity > 0)) {
48670 - ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
48671 - "with iv:\n");
48672 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48673 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48674 - "encryption:\n");
48675 - ecryptfs_dump_hex((char *)
48676 - (page_address(page)
48677 - + (extent_offset * crypt_stat->extent_size)),
48678 - 8);
48679 - }
48680 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
48681 page, (extent_offset
48682 * crypt_stat->extent_size),
48683 @@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48684 goto out;
48685 }
48686 rc = 0;
48687 - if (unlikely(ecryptfs_verbosity > 0)) {
48688 - ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
48689 - "rc = [%d]\n", (extent_base + extent_offset),
48690 - rc);
48691 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48692 - "encryption:\n");
48693 - ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
48694 - }
48695 out:
48696 return rc;
48697 }
48698 @@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48699 rc);
48700 goto out;
48701 }
48702 - if (unlikely(ecryptfs_verbosity > 0)) {
48703 - ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
48704 - "with iv:\n");
48705 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48706 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48707 - "decryption:\n");
48708 - ecryptfs_dump_hex((char *)
48709 - (page_address(enc_extent_page)
48710 - + (extent_offset * crypt_stat->extent_size)),
48711 - 8);
48712 - }
48713 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
48714 (extent_offset
48715 * crypt_stat->extent_size),
48716 @@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48717 goto out;
48718 }
48719 rc = 0;
48720 - if (unlikely(ecryptfs_verbosity > 0)) {
48721 - ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
48722 - "rc = [%d]\n", (extent_base + extent_offset),
48723 - rc);
48724 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48725 - "decryption:\n");
48726 - ecryptfs_dump_hex((char *)(page_address(page)
48727 - + (extent_offset
48728 - * crypt_stat->extent_size)), 8);
48729 - }
48730 out:
48731 return rc;
48732 }
48733 @@ -1455,6 +1415,25 @@ static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
48734 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
48735 }
48736
48737 +void ecryptfs_i_size_init(const char *page_virt, struct inode *inode)
48738 +{
48739 + struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
48740 + struct ecryptfs_crypt_stat *crypt_stat;
48741 + u64 file_size;
48742 +
48743 + crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
48744 + mount_crypt_stat =
48745 + &ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat;
48746 + if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
48747 + file_size = i_size_read(ecryptfs_inode_to_lower(inode));
48748 + if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
48749 + file_size += crypt_stat->num_header_bytes_at_front;
48750 + } else
48751 + file_size = get_unaligned_be64(page_virt);
48752 + i_size_write(inode, (loff_t)file_size);
48753 + crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED;
48754 +}
48755 +
48756 /**
48757 * ecryptfs_read_headers_virt
48758 * @page_virt: The virtual address into which to read the headers
48759 @@ -1485,6 +1464,8 @@ static int ecryptfs_read_headers_virt(char *page_virt,
48760 rc = -EINVAL;
48761 goto out;
48762 }
48763 + if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED))
48764 + ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
48765 offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
48766 rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset),
48767 &bytes_read);
48768 diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
48769 index 542f625..9685315 100644
48770 --- a/fs/ecryptfs/ecryptfs_kernel.h
48771 +++ b/fs/ecryptfs/ecryptfs_kernel.h
48772 @@ -270,6 +270,7 @@ struct ecryptfs_crypt_stat {
48773 #define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00001000
48774 #define ECRYPTFS_ENCFN_USE_FEK 0x00002000
48775 #define ECRYPTFS_UNLINK_SIGS 0x00004000
48776 +#define ECRYPTFS_I_SIZE_INITIALIZED 0x00008000
48777 u32 flags;
48778 unsigned int file_version;
48779 size_t iv_bytes;
48780 @@ -619,6 +620,7 @@ struct ecryptfs_open_req {
48781 int ecryptfs_interpose(struct dentry *hidden_dentry,
48782 struct dentry *this_dentry, struct super_block *sb,
48783 u32 flags);
48784 +void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
48785 int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
48786 struct dentry *lower_dentry,
48787 struct inode *ecryptfs_dir_inode,
48788 diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
48789 index 3015389..49129f4 100644
48790 --- a/fs/ecryptfs/file.c
48791 +++ b/fs/ecryptfs/file.c
48792 @@ -237,7 +237,8 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
48793 goto out_free;
48794 }
48795 rc = 0;
48796 - crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
48797 + crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
48798 + | ECRYPTFS_ENCRYPTED);
48799 mutex_unlock(&crypt_stat->cs_mutex);
48800 goto out;
48801 }
48802 @@ -347,7 +348,6 @@ const struct file_operations ecryptfs_main_fops = {
48803 #ifdef CONFIG_COMPAT
48804 .compat_ioctl = ecryptfs_compat_ioctl,
48805 #endif
48806 - .mmap = generic_file_mmap,
48807 .open = ecryptfs_open,
48808 .flush = ecryptfs_flush,
48809 .release = ecryptfs_release,
48810 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
48811 index 4434e8f..fa05803 100644
48812 --- a/fs/ecryptfs/inode.c
48813 +++ b/fs/ecryptfs/inode.c
48814 @@ -256,10 +256,8 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
48815 struct dentry *lower_dir_dentry;
48816 struct vfsmount *lower_mnt;
48817 struct inode *lower_inode;
48818 - struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
48819 struct ecryptfs_crypt_stat *crypt_stat;
48820 char *page_virt = NULL;
48821 - u64 file_size;
48822 int rc = 0;
48823
48824 lower_dir_dentry = lower_dentry->d_parent;
48825 @@ -334,18 +332,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
48826 }
48827 crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
48828 }
48829 - mount_crypt_stat = &ecryptfs_superblock_to_private(
48830 - ecryptfs_dentry->d_sb)->mount_crypt_stat;
48831 - if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
48832 - if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
48833 - file_size = (crypt_stat->num_header_bytes_at_front
48834 - + i_size_read(lower_dentry->d_inode));
48835 - else
48836 - file_size = i_size_read(lower_dentry->d_inode);
48837 - } else {
48838 - file_size = get_unaligned_be64(page_virt);
48839 - }
48840 - i_size_write(ecryptfs_dentry->d_inode, (loff_t)file_size);
48841 + ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
48842 out_free_kmem:
48843 kmem_cache_free(ecryptfs_header_cache_2, page_virt);
48844 goto out;
48845 @@ -660,7 +647,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
48846 old_fs = get_fs();
48847 set_fs(get_ds());
48848 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
48849 - (char __user *)lower_buf,
48850 + (char __force_user *)lower_buf,
48851 lower_bufsiz);
48852 set_fs(old_fs);
48853 if (rc < 0)
48854 @@ -706,7 +693,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48855 }
48856 old_fs = get_fs();
48857 set_fs(get_ds());
48858 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48859 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48860 set_fs(old_fs);
48861 if (rc < 0)
48862 goto out_free;
48863 @@ -964,7 +951,8 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
48864 goto out;
48865 }
48866 rc = 0;
48867 - crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
48868 + crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
48869 + | ECRYPTFS_ENCRYPTED);
48870 }
48871 }
48872 mutex_unlock(&crypt_stat->cs_mutex);
48873 diff --git a/fs/exec.c b/fs/exec.c
48874 index 86fafc6..6272c0e 100644
48875 --- a/fs/exec.c
48876 +++ b/fs/exec.c
48877 @@ -56,12 +56,28 @@
48878 #include <linux/fsnotify.h>
48879 #include <linux/fs_struct.h>
48880 #include <linux/pipe_fs_i.h>
48881 +#include <linux/random.h>
48882 +#include <linux/seq_file.h>
48883 +
48884 +#ifdef CONFIG_PAX_REFCOUNT
48885 +#include <linux/kallsyms.h>
48886 +#include <linux/kdebug.h>
48887 +#endif
48888
48889 #include <asm/uaccess.h>
48890 #include <asm/mmu_context.h>
48891 #include <asm/tlb.h>
48892 #include "internal.h"
48893
48894 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
48895 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
48896 +#endif
48897 +
48898 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48899 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48900 +EXPORT_SYMBOL(pax_set_initial_flags_func);
48901 +#endif
48902 +
48903 int core_uses_pid;
48904 char core_pattern[CORENAME_MAX_SIZE] = "core";
48905 unsigned int core_pipe_limit;
48906 @@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48907 int write)
48908 {
48909 struct page *page;
48910 - int ret;
48911
48912 -#ifdef CONFIG_STACK_GROWSUP
48913 - if (write) {
48914 - ret = expand_stack_downwards(bprm->vma, pos);
48915 - if (ret < 0)
48916 - return NULL;
48917 - }
48918 -#endif
48919 - ret = get_user_pages(current, bprm->mm, pos,
48920 - 1, write, 1, &page, NULL);
48921 - if (ret <= 0)
48922 + if (0 > expand_stack_downwards(bprm->vma, pos))
48923 + return NULL;
48924 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
48925 return NULL;
48926
48927 if (write) {
48928 @@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48929 if (size <= ARG_MAX)
48930 return page;
48931
48932 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48933 + // only allow 1MB for argv+env on suid/sgid binaries
48934 + // to prevent easy ASLR exhaustion
48935 + if (((bprm->cred->euid != current_euid()) ||
48936 + (bprm->cred->egid != current_egid())) &&
48937 + (size > (1024 * 1024))) {
48938 + put_page(page);
48939 + return NULL;
48940 + }
48941 +#endif
48942 +
48943 /*
48944 * Limit to 1/4-th the stack size for the argv+env strings.
48945 * This ensures that:
48946 @@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48947 vma->vm_end = STACK_TOP_MAX;
48948 vma->vm_start = vma->vm_end - PAGE_SIZE;
48949 vma->vm_flags = VM_STACK_FLAGS;
48950 +
48951 +#ifdef CONFIG_PAX_SEGMEXEC
48952 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
48953 +#endif
48954 +
48955 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
48956
48957 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
48958 @@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48959 mm->stack_vm = mm->total_vm = 1;
48960 up_write(&mm->mmap_sem);
48961 bprm->p = vma->vm_end - sizeof(void *);
48962 +
48963 +#ifdef CONFIG_PAX_RANDUSTACK
48964 + if (randomize_va_space)
48965 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
48966 +#endif
48967 +
48968 return 0;
48969 err:
48970 up_write(&mm->mmap_sem);
48971 @@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
48972 int r;
48973 mm_segment_t oldfs = get_fs();
48974 set_fs(KERNEL_DS);
48975 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
48976 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
48977 set_fs(oldfs);
48978 return r;
48979 }
48980 @@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48981 unsigned long new_end = old_end - shift;
48982 struct mmu_gather *tlb;
48983
48984 - BUG_ON(new_start > new_end);
48985 + if (new_start >= new_end || new_start < mmap_min_addr)
48986 + return -ENOMEM;
48987
48988 /*
48989 * ensure there are no vmas between where we want to go
48990 @@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48991 if (vma != find_vma(mm, new_start))
48992 return -EFAULT;
48993
48994 +#ifdef CONFIG_PAX_SEGMEXEC
48995 + BUG_ON(pax_find_mirror_vma(vma));
48996 +#endif
48997 +
48998 /*
48999 * cover the whole range: [new_start, old_end)
49000 */
49001 @@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
49002 stack_top = arch_align_stack(stack_top);
49003 stack_top = PAGE_ALIGN(stack_top);
49004
49005 - if (unlikely(stack_top < mmap_min_addr) ||
49006 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
49007 - return -ENOMEM;
49008 -
49009 stack_shift = vma->vm_end - stack_top;
49010
49011 bprm->p -= stack_shift;
49012 @@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
49013 bprm->exec -= stack_shift;
49014
49015 down_write(&mm->mmap_sem);
49016 +
49017 + /* Move stack pages down in memory. */
49018 + if (stack_shift) {
49019 + ret = shift_arg_pages(vma, stack_shift);
49020 + if (ret)
49021 + goto out_unlock;
49022 + }
49023 +
49024 vm_flags = VM_STACK_FLAGS;
49025
49026 /*
49027 @@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
49028 vm_flags &= ~VM_EXEC;
49029 vm_flags |= mm->def_flags;
49030
49031 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49032 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49033 + vm_flags &= ~VM_EXEC;
49034 +
49035 +#ifdef CONFIG_PAX_MPROTECT
49036 + if (mm->pax_flags & MF_PAX_MPROTECT)
49037 + vm_flags &= ~VM_MAYEXEC;
49038 +#endif
49039 +
49040 + }
49041 +#endif
49042 +
49043 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
49044 vm_flags);
49045 if (ret)
49046 goto out_unlock;
49047 BUG_ON(prev != vma);
49048
49049 - /* Move stack pages down in memory. */
49050 - if (stack_shift) {
49051 - ret = shift_arg_pages(vma, stack_shift);
49052 - if (ret)
49053 - goto out_unlock;
49054 - }
49055 -
49056 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
49057 stack_size = vma->vm_end - vma->vm_start;
49058 /*
49059 @@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
49060 old_fs = get_fs();
49061 set_fs(get_ds());
49062 /* The cast to a user pointer is valid due to the set_fs() */
49063 - result = vfs_read(file, (void __user *)addr, count, &pos);
49064 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
49065 set_fs(old_fs);
49066 return result;
49067 }
49068 @@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
49069 perf_event_comm(tsk);
49070 }
49071
49072 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
49073 +{
49074 + int i, ch;
49075 +
49076 + /* Copies the binary name from after last slash */
49077 + for (i = 0; (ch = *(fn++)) != '\0';) {
49078 + if (ch == '/')
49079 + i = 0; /* overwrite what we wrote */
49080 + else
49081 + if (i < len - 1)
49082 + tcomm[i++] = ch;
49083 + }
49084 + tcomm[i] = '\0';
49085 +}
49086 +
49087 int flush_old_exec(struct linux_binprm * bprm)
49088 {
49089 int retval;
49090 @@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
49091
49092 set_mm_exe_file(bprm->mm, bprm->file);
49093
49094 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
49095 /*
49096 * Release all of the old mmap stuff
49097 */
49098 @@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
49099
49100 void setup_new_exec(struct linux_binprm * bprm)
49101 {
49102 - int i, ch;
49103 - char * name;
49104 - char tcomm[sizeof(current->comm)];
49105 -
49106 arch_pick_mmap_layout(current->mm);
49107
49108 /* This is the point of no return */
49109 @@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
49110 else
49111 set_dumpable(current->mm, suid_dumpable);
49112
49113 - name = bprm->filename;
49114 -
49115 - /* Copies the binary name from after last slash */
49116 - for (i=0; (ch = *(name++)) != '\0';) {
49117 - if (ch == '/')
49118 - i = 0; /* overwrite what we wrote */
49119 - else
49120 - if (i < (sizeof(tcomm) - 1))
49121 - tcomm[i++] = ch;
49122 - }
49123 - tcomm[i] = '\0';
49124 - set_task_comm(current, tcomm);
49125 + set_task_comm(current, bprm->tcomm);
49126
49127 /* Set the new mm task size. We have to do that late because it may
49128 * depend on TIF_32BIT which is only updated in flush_thread() on
49129 @@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
49130 }
49131 rcu_read_unlock();
49132
49133 - if (p->fs->users > n_fs) {
49134 + if (atomic_read(&p->fs->users) > n_fs) {
49135 bprm->unsafe |= LSM_UNSAFE_SHARE;
49136 } else {
49137 res = -EAGAIN;
49138 @@ -1339,6 +1384,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
49139
49140 EXPORT_SYMBOL(search_binary_handler);
49141
49142 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49143 +atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
49144 +#endif
49145 +
49146 /*
49147 * sys_execve() executes a new program.
49148 */
49149 @@ -1347,11 +1396,35 @@ int do_execve(char * filename,
49150 char __user *__user *envp,
49151 struct pt_regs * regs)
49152 {
49153 +#ifdef CONFIG_GRKERNSEC
49154 + struct file *old_exec_file;
49155 + struct acl_subject_label *old_acl;
49156 + struct rlimit old_rlim[RLIM_NLIMITS];
49157 +#endif
49158 struct linux_binprm *bprm;
49159 struct file *file;
49160 struct files_struct *displaced;
49161 bool clear_in_exec;
49162 int retval;
49163 + const struct cred *cred = current_cred();
49164 +
49165 + /*
49166 + * We move the actual failure in case of RLIMIT_NPROC excess from
49167 + * set*uid() to execve() because too many poorly written programs
49168 + * don't check setuid() return code. Here we additionally recheck
49169 + * whether NPROC limit is still exceeded.
49170 + */
49171 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
49172 +
49173 + if ((current->flags & PF_NPROC_EXCEEDED) &&
49174 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
49175 + retval = -EAGAIN;
49176 + goto out_ret;
49177 + }
49178 +
49179 + /* We're below the limit (still or again), so we don't want to make
49180 + * further execve() calls fail. */
49181 + current->flags &= ~PF_NPROC_EXCEEDED;
49182
49183 retval = unshare_files(&displaced);
49184 if (retval)
49185 @@ -1377,12 +1450,27 @@ int do_execve(char * filename,
49186 if (IS_ERR(file))
49187 goto out_unmark;
49188
49189 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
49190 + retval = -EPERM;
49191 + goto out_file;
49192 + }
49193 +
49194 sched_exec();
49195
49196 bprm->file = file;
49197 bprm->filename = filename;
49198 bprm->interp = filename;
49199
49200 + if (gr_process_user_ban()) {
49201 + retval = -EPERM;
49202 + goto out_file;
49203 + }
49204 +
49205 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
49206 + retval = -EACCES;
49207 + goto out_file;
49208 + }
49209 +
49210 retval = bprm_mm_init(bprm);
49211 if (retval)
49212 goto out_file;
49213 @@ -1412,12 +1500,47 @@ int do_execve(char * filename,
49214 if (retval < 0)
49215 goto out;
49216
49217 + if (!gr_tpe_allow(file)) {
49218 + retval = -EACCES;
49219 + goto out;
49220 + }
49221 +
49222 + if (gr_check_crash_exec(file)) {
49223 + retval = -EACCES;
49224 + goto out;
49225 + }
49226 +
49227 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
49228 +
49229 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
49230 +
49231 +#ifdef CONFIG_GRKERNSEC
49232 + old_acl = current->acl;
49233 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
49234 + old_exec_file = current->exec_file;
49235 + get_file(file);
49236 + current->exec_file = file;
49237 +#endif
49238 +
49239 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
49240 + bprm->unsafe);
49241 + if (retval < 0)
49242 + goto out_fail;
49243 +
49244 current->flags &= ~PF_KTHREAD;
49245 retval = search_binary_handler(bprm,regs);
49246 if (retval < 0)
49247 - goto out;
49248 + goto out_fail;
49249 +#ifdef CONFIG_GRKERNSEC
49250 + if (old_exec_file)
49251 + fput(old_exec_file);
49252 +#endif
49253
49254 /* execve succeeded */
49255 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49256 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
49257 +#endif
49258 +
49259 current->fs->in_exec = 0;
49260 current->in_execve = 0;
49261 acct_update_integrals(current);
49262 @@ -1426,6 +1549,14 @@ int do_execve(char * filename,
49263 put_files_struct(displaced);
49264 return retval;
49265
49266 +out_fail:
49267 +#ifdef CONFIG_GRKERNSEC
49268 + current->acl = old_acl;
49269 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
49270 + fput(current->exec_file);
49271 + current->exec_file = old_exec_file;
49272 +#endif
49273 +
49274 out:
49275 if (bprm->mm) {
49276 acct_arg_size(bprm, 0);
49277 @@ -1591,6 +1722,220 @@ out:
49278 return ispipe;
49279 }
49280
49281 +int pax_check_flags(unsigned long *flags)
49282 +{
49283 + int retval = 0;
49284 +
49285 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
49286 + if (*flags & MF_PAX_SEGMEXEC)
49287 + {
49288 + *flags &= ~MF_PAX_SEGMEXEC;
49289 + retval = -EINVAL;
49290 + }
49291 +#endif
49292 +
49293 + if ((*flags & MF_PAX_PAGEEXEC)
49294 +
49295 +#ifdef CONFIG_PAX_PAGEEXEC
49296 + && (*flags & MF_PAX_SEGMEXEC)
49297 +#endif
49298 +
49299 + )
49300 + {
49301 + *flags &= ~MF_PAX_PAGEEXEC;
49302 + retval = -EINVAL;
49303 + }
49304 +
49305 + if ((*flags & MF_PAX_MPROTECT)
49306 +
49307 +#ifdef CONFIG_PAX_MPROTECT
49308 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49309 +#endif
49310 +
49311 + )
49312 + {
49313 + *flags &= ~MF_PAX_MPROTECT;
49314 + retval = -EINVAL;
49315 + }
49316 +
49317 + if ((*flags & MF_PAX_EMUTRAMP)
49318 +
49319 +#ifdef CONFIG_PAX_EMUTRAMP
49320 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49321 +#endif
49322 +
49323 + )
49324 + {
49325 + *flags &= ~MF_PAX_EMUTRAMP;
49326 + retval = -EINVAL;
49327 + }
49328 +
49329 + return retval;
49330 +}
49331 +
49332 +EXPORT_SYMBOL(pax_check_flags);
49333 +
49334 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49335 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
49336 +{
49337 + struct task_struct *tsk = current;
49338 + struct mm_struct *mm = current->mm;
49339 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
49340 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
49341 + char *path_exec = NULL;
49342 + char *path_fault = NULL;
49343 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
49344 +
49345 + if (buffer_exec && buffer_fault) {
49346 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
49347 +
49348 + down_read(&mm->mmap_sem);
49349 + vma = mm->mmap;
49350 + while (vma && (!vma_exec || !vma_fault)) {
49351 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
49352 + vma_exec = vma;
49353 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
49354 + vma_fault = vma;
49355 + vma = vma->vm_next;
49356 + }
49357 + if (vma_exec) {
49358 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
49359 + if (IS_ERR(path_exec))
49360 + path_exec = "<path too long>";
49361 + else {
49362 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
49363 + if (path_exec) {
49364 + *path_exec = 0;
49365 + path_exec = buffer_exec;
49366 + } else
49367 + path_exec = "<path too long>";
49368 + }
49369 + }
49370 + if (vma_fault) {
49371 + start = vma_fault->vm_start;
49372 + end = vma_fault->vm_end;
49373 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
49374 + if (vma_fault->vm_file) {
49375 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
49376 + if (IS_ERR(path_fault))
49377 + path_fault = "<path too long>";
49378 + else {
49379 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
49380 + if (path_fault) {
49381 + *path_fault = 0;
49382 + path_fault = buffer_fault;
49383 + } else
49384 + path_fault = "<path too long>";
49385 + }
49386 + } else
49387 + path_fault = "<anonymous mapping>";
49388 + }
49389 + up_read(&mm->mmap_sem);
49390 + }
49391 + if (tsk->signal->curr_ip)
49392 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
49393 + else
49394 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
49395 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
49396 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
49397 + task_uid(tsk), task_euid(tsk), pc, sp);
49398 + free_page((unsigned long)buffer_exec);
49399 + free_page((unsigned long)buffer_fault);
49400 + pax_report_insns(regs, pc, sp);
49401 + do_coredump(SIGKILL, SIGKILL, regs);
49402 +}
49403 +#endif
49404 +
49405 +#ifdef CONFIG_PAX_REFCOUNT
49406 +void pax_report_refcount_overflow(struct pt_regs *regs)
49407 +{
49408 + if (current->signal->curr_ip)
49409 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49410 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
49411 + else
49412 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49413 + current->comm, task_pid_nr(current), current_uid(), current_euid());
49414 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
49415 + show_regs(regs);
49416 + force_sig_specific(SIGKILL, current);
49417 +}
49418 +#endif
49419 +
49420 +#ifdef CONFIG_PAX_USERCOPY
49421 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
49422 +int object_is_on_stack(const void *obj, unsigned long len)
49423 +{
49424 + const void * const stack = task_stack_page(current);
49425 + const void * const stackend = stack + THREAD_SIZE;
49426 +
49427 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49428 + const void *frame = NULL;
49429 + const void *oldframe;
49430 +#endif
49431 +
49432 + if (obj + len < obj)
49433 + return -1;
49434 +
49435 + if (obj + len <= stack || stackend <= obj)
49436 + return 0;
49437 +
49438 + if (obj < stack || stackend < obj + len)
49439 + return -1;
49440 +
49441 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49442 + oldframe = __builtin_frame_address(1);
49443 + if (oldframe)
49444 + frame = __builtin_frame_address(2);
49445 + /*
49446 + low ----------------------------------------------> high
49447 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
49448 + ^----------------^
49449 + allow copies only within here
49450 + */
49451 + while (stack <= frame && frame < stackend) {
49452 + /* if obj + len extends past the last frame, this
49453 + check won't pass and the next frame will be 0,
49454 + causing us to bail out and correctly report
49455 + the copy as invalid
49456 + */
49457 + if (obj + len <= frame)
49458 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
49459 + oldframe = frame;
49460 + frame = *(const void * const *)frame;
49461 + }
49462 + return -1;
49463 +#else
49464 + return 1;
49465 +#endif
49466 +}
49467 +
49468 +
49469 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
49470 +{
49471 + if (current->signal->curr_ip)
49472 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49473 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49474 + else
49475 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49476 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49477 +
49478 + dump_stack();
49479 + gr_handle_kernel_exploit();
49480 + do_group_exit(SIGKILL);
49481 +}
49482 +#endif
49483 +
49484 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
49485 +void pax_track_stack(void)
49486 +{
49487 + unsigned long sp = (unsigned long)&sp;
49488 + if (sp < current_thread_info()->lowest_stack &&
49489 + sp > (unsigned long)task_stack_page(current))
49490 + current_thread_info()->lowest_stack = sp;
49491 +}
49492 +EXPORT_SYMBOL(pax_track_stack);
49493 +#endif
49494 +
49495 static int zap_process(struct task_struct *start)
49496 {
49497 struct task_struct *t;
49498 @@ -1793,17 +2138,17 @@ static void wait_for_dump_helpers(struct file *file)
49499 pipe = file->f_path.dentry->d_inode->i_pipe;
49500
49501 pipe_lock(pipe);
49502 - pipe->readers++;
49503 - pipe->writers--;
49504 + atomic_inc(&pipe->readers);
49505 + atomic_dec(&pipe->writers);
49506
49507 - while ((pipe->readers > 1) && (!signal_pending(current))) {
49508 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
49509 wake_up_interruptible_sync(&pipe->wait);
49510 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
49511 pipe_wait(pipe);
49512 }
49513
49514 - pipe->readers--;
49515 - pipe->writers++;
49516 + atomic_dec(&pipe->readers);
49517 + atomic_inc(&pipe->writers);
49518 pipe_unlock(pipe);
49519
49520 }
49521 @@ -1826,10 +2171,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49522 char **helper_argv = NULL;
49523 int helper_argc = 0;
49524 int dump_count = 0;
49525 - static atomic_t core_dump_count = ATOMIC_INIT(0);
49526 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49527
49528 audit_core_dumps(signr);
49529
49530 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49531 + gr_handle_brute_attach(current, mm->flags);
49532 +
49533 binfmt = mm->binfmt;
49534 if (!binfmt || !binfmt->core_dump)
49535 goto fail;
49536 @@ -1874,6 +2222,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49537 */
49538 clear_thread_flag(TIF_SIGPENDING);
49539
49540 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49541 +
49542 /*
49543 * lock_kernel() because format_corename() is controlled by sysctl, which
49544 * uses lock_kernel()
49545 @@ -1908,7 +2258,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49546 goto fail_unlock;
49547 }
49548
49549 - dump_count = atomic_inc_return(&core_dump_count);
49550 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
49551 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49552 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49553 task_tgid_vnr(current), current->comm);
49554 @@ -1972,7 +2322,7 @@ close_fail:
49555 filp_close(file, NULL);
49556 fail_dropcount:
49557 if (dump_count)
49558 - atomic_dec(&core_dump_count);
49559 + atomic_dec_unchecked(&core_dump_count);
49560 fail_unlock:
49561 if (helper_argv)
49562 argv_free(helper_argv);
49563 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
49564 index 7f8d2e5..a1abdbb 100644
49565 --- a/fs/ext2/balloc.c
49566 +++ b/fs/ext2/balloc.c
49567 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
49568
49569 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49570 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49571 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49572 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49573 sbi->s_resuid != current_fsuid() &&
49574 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49575 return 0;
49576 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
49577 index 27967f9..9f2a5fb 100644
49578 --- a/fs/ext3/balloc.c
49579 +++ b/fs/ext3/balloc.c
49580 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
49581
49582 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49583 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49584 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49585 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49586 sbi->s_resuid != current_fsuid() &&
49587 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49588 return 0;
49589 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
49590 index e85b63c..80398e6 100644
49591 --- a/fs/ext4/balloc.c
49592 +++ b/fs/ext4/balloc.c
49593 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
49594 /* Hm, nope. Are (enough) root reserved blocks available? */
49595 if (sbi->s_resuid == current_fsuid() ||
49596 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
49597 - capable(CAP_SYS_RESOURCE)) {
49598 + capable_nolog(CAP_SYS_RESOURCE)) {
49599 if (free_blocks >= (nblocks + dirty_blocks))
49600 return 1;
49601 }
49602 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
49603 index 67c46ed..1f237e5 100644
49604 --- a/fs/ext4/ext4.h
49605 +++ b/fs/ext4/ext4.h
49606 @@ -1077,19 +1077,19 @@ struct ext4_sb_info {
49607
49608 /* stats for buddy allocator */
49609 spinlock_t s_mb_pa_lock;
49610 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
49611 - atomic_t s_bal_success; /* we found long enough chunks */
49612 - atomic_t s_bal_allocated; /* in blocks */
49613 - atomic_t s_bal_ex_scanned; /* total extents scanned */
49614 - atomic_t s_bal_goals; /* goal hits */
49615 - atomic_t s_bal_breaks; /* too long searches */
49616 - atomic_t s_bal_2orders; /* 2^order hits */
49617 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
49618 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
49619 + atomic_unchecked_t s_bal_allocated; /* in blocks */
49620 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
49621 + atomic_unchecked_t s_bal_goals; /* goal hits */
49622 + atomic_unchecked_t s_bal_breaks; /* too long searches */
49623 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
49624 spinlock_t s_bal_lock;
49625 unsigned long s_mb_buddies_generated;
49626 unsigned long long s_mb_generation_time;
49627 - atomic_t s_mb_lost_chunks;
49628 - atomic_t s_mb_preallocated;
49629 - atomic_t s_mb_discarded;
49630 + atomic_unchecked_t s_mb_lost_chunks;
49631 + atomic_unchecked_t s_mb_preallocated;
49632 + atomic_unchecked_t s_mb_discarded;
49633 atomic_t s_lock_busy;
49634
49635 /* locality groups */
49636 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
49637 index 2a60541..7439d61 100644
49638 --- a/fs/ext4/file.c
49639 +++ b/fs/ext4/file.c
49640 @@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
49641 cp = d_path(&path, buf, sizeof(buf));
49642 path_put(&path);
49643 if (!IS_ERR(cp)) {
49644 - memcpy(sbi->s_es->s_last_mounted, cp,
49645 - sizeof(sbi->s_es->s_last_mounted));
49646 + strlcpy(sbi->s_es->s_last_mounted, cp,
49647 + sizeof(sbi->s_es->s_last_mounted));
49648 sb->s_dirt = 1;
49649 }
49650 }
49651 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
49652 index 42bac1b..0aab9d8 100644
49653 --- a/fs/ext4/mballoc.c
49654 +++ b/fs/ext4/mballoc.c
49655 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
49656 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
49657
49658 if (EXT4_SB(sb)->s_mb_stats)
49659 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
49660 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
49661
49662 break;
49663 }
49664 @@ -2131,7 +2131,7 @@ repeat:
49665 ac->ac_status = AC_STATUS_CONTINUE;
49666 ac->ac_flags |= EXT4_MB_HINT_FIRST;
49667 cr = 3;
49668 - atomic_inc(&sbi->s_mb_lost_chunks);
49669 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
49670 goto repeat;
49671 }
49672 }
49673 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
49674 ext4_grpblk_t counters[16];
49675 } sg;
49676
49677 + pax_track_stack();
49678 +
49679 group--;
49680 if (group == 0)
49681 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
49682 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
49683 if (sbi->s_mb_stats) {
49684 printk(KERN_INFO
49685 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
49686 - atomic_read(&sbi->s_bal_allocated),
49687 - atomic_read(&sbi->s_bal_reqs),
49688 - atomic_read(&sbi->s_bal_success));
49689 + atomic_read_unchecked(&sbi->s_bal_allocated),
49690 + atomic_read_unchecked(&sbi->s_bal_reqs),
49691 + atomic_read_unchecked(&sbi->s_bal_success));
49692 printk(KERN_INFO
49693 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
49694 "%u 2^N hits, %u breaks, %u lost\n",
49695 - atomic_read(&sbi->s_bal_ex_scanned),
49696 - atomic_read(&sbi->s_bal_goals),
49697 - atomic_read(&sbi->s_bal_2orders),
49698 - atomic_read(&sbi->s_bal_breaks),
49699 - atomic_read(&sbi->s_mb_lost_chunks));
49700 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
49701 + atomic_read_unchecked(&sbi->s_bal_goals),
49702 + atomic_read_unchecked(&sbi->s_bal_2orders),
49703 + atomic_read_unchecked(&sbi->s_bal_breaks),
49704 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
49705 printk(KERN_INFO
49706 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
49707 sbi->s_mb_buddies_generated++,
49708 sbi->s_mb_generation_time);
49709 printk(KERN_INFO
49710 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
49711 - atomic_read(&sbi->s_mb_preallocated),
49712 - atomic_read(&sbi->s_mb_discarded));
49713 + atomic_read_unchecked(&sbi->s_mb_preallocated),
49714 + atomic_read_unchecked(&sbi->s_mb_discarded));
49715 }
49716
49717 free_percpu(sbi->s_locality_groups);
49718 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
49719 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
49720
49721 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
49722 - atomic_inc(&sbi->s_bal_reqs);
49723 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49724 + atomic_inc_unchecked(&sbi->s_bal_reqs);
49725 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49726 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
49727 - atomic_inc(&sbi->s_bal_success);
49728 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
49729 + atomic_inc_unchecked(&sbi->s_bal_success);
49730 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
49731 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
49732 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
49733 - atomic_inc(&sbi->s_bal_goals);
49734 + atomic_inc_unchecked(&sbi->s_bal_goals);
49735 if (ac->ac_found > sbi->s_mb_max_to_scan)
49736 - atomic_inc(&sbi->s_bal_breaks);
49737 + atomic_inc_unchecked(&sbi->s_bal_breaks);
49738 }
49739
49740 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
49741 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
49742 trace_ext4_mb_new_inode_pa(ac, pa);
49743
49744 ext4_mb_use_inode_pa(ac, pa);
49745 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49746 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49747
49748 ei = EXT4_I(ac->ac_inode);
49749 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49750 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
49751 trace_ext4_mb_new_group_pa(ac, pa);
49752
49753 ext4_mb_use_group_pa(ac, pa);
49754 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49755 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49756
49757 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49758 lg = ac->ac_lg;
49759 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
49760 * from the bitmap and continue.
49761 */
49762 }
49763 - atomic_add(free, &sbi->s_mb_discarded);
49764 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
49765
49766 return err;
49767 }
49768 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
49769 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
49770 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
49771 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
49772 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49773 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49774
49775 if (ac) {
49776 ac->ac_sb = sb;
49777 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
49778 index f1e7077..edd86b2 100644
49779 --- a/fs/ext4/super.c
49780 +++ b/fs/ext4/super.c
49781 @@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
49782 }
49783
49784
49785 -static struct sysfs_ops ext4_attr_ops = {
49786 +static const struct sysfs_ops ext4_attr_ops = {
49787 .show = ext4_attr_show,
49788 .store = ext4_attr_store,
49789 };
49790 diff --git a/fs/fcntl.c b/fs/fcntl.c
49791 index 97e01dc..e9aab2d 100644
49792 --- a/fs/fcntl.c
49793 +++ b/fs/fcntl.c
49794 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
49795 if (err)
49796 return err;
49797
49798 + if (gr_handle_chroot_fowner(pid, type))
49799 + return -ENOENT;
49800 + if (gr_check_protected_task_fowner(pid, type))
49801 + return -EACCES;
49802 +
49803 f_modown(filp, pid, type, force);
49804 return 0;
49805 }
49806 @@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
49807
49808 static int f_setown_ex(struct file *filp, unsigned long arg)
49809 {
49810 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49811 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49812 struct f_owner_ex owner;
49813 struct pid *pid;
49814 int type;
49815 @@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
49816
49817 static int f_getown_ex(struct file *filp, unsigned long arg)
49818 {
49819 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49820 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49821 struct f_owner_ex owner;
49822 int ret = 0;
49823
49824 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
49825 switch (cmd) {
49826 case F_DUPFD:
49827 case F_DUPFD_CLOEXEC:
49828 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
49829 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49830 break;
49831 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
49832 diff --git a/fs/fifo.c b/fs/fifo.c
49833 index f8f97b8..b1f2259 100644
49834 --- a/fs/fifo.c
49835 +++ b/fs/fifo.c
49836 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
49837 */
49838 filp->f_op = &read_pipefifo_fops;
49839 pipe->r_counter++;
49840 - if (pipe->readers++ == 0)
49841 + if (atomic_inc_return(&pipe->readers) == 1)
49842 wake_up_partner(inode);
49843
49844 - if (!pipe->writers) {
49845 + if (!atomic_read(&pipe->writers)) {
49846 if ((filp->f_flags & O_NONBLOCK)) {
49847 /* suppress POLLHUP until we have
49848 * seen a writer */
49849 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
49850 * errno=ENXIO when there is no process reading the FIFO.
49851 */
49852 ret = -ENXIO;
49853 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
49854 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
49855 goto err;
49856
49857 filp->f_op = &write_pipefifo_fops;
49858 pipe->w_counter++;
49859 - if (!pipe->writers++)
49860 + if (atomic_inc_return(&pipe->writers) == 1)
49861 wake_up_partner(inode);
49862
49863 - if (!pipe->readers) {
49864 + if (!atomic_read(&pipe->readers)) {
49865 wait_for_partner(inode, &pipe->r_counter);
49866 if (signal_pending(current))
49867 goto err_wr;
49868 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
49869 */
49870 filp->f_op = &rdwr_pipefifo_fops;
49871
49872 - pipe->readers++;
49873 - pipe->writers++;
49874 + atomic_inc(&pipe->readers);
49875 + atomic_inc(&pipe->writers);
49876 pipe->r_counter++;
49877 pipe->w_counter++;
49878 - if (pipe->readers == 1 || pipe->writers == 1)
49879 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
49880 wake_up_partner(inode);
49881 break;
49882
49883 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
49884 return 0;
49885
49886 err_rd:
49887 - if (!--pipe->readers)
49888 + if (atomic_dec_and_test(&pipe->readers))
49889 wake_up_interruptible(&pipe->wait);
49890 ret = -ERESTARTSYS;
49891 goto err;
49892
49893 err_wr:
49894 - if (!--pipe->writers)
49895 + if (atomic_dec_and_test(&pipe->writers))
49896 wake_up_interruptible(&pipe->wait);
49897 ret = -ERESTARTSYS;
49898 goto err;
49899
49900 err:
49901 - if (!pipe->readers && !pipe->writers)
49902 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
49903 free_pipe_info(inode);
49904
49905 err_nocleanup:
49906 diff --git a/fs/file.c b/fs/file.c
49907 index 87e1290..a930cc4 100644
49908 --- a/fs/file.c
49909 +++ b/fs/file.c
49910 @@ -14,6 +14,7 @@
49911 #include <linux/slab.h>
49912 #include <linux/vmalloc.h>
49913 #include <linux/file.h>
49914 +#include <linux/security.h>
49915 #include <linux/fdtable.h>
49916 #include <linux/bitops.h>
49917 #include <linux/interrupt.h>
49918 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
49919 * N.B. For clone tasks sharing a files structure, this test
49920 * will limit the total number of files that can be opened.
49921 */
49922 +
49923 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
49924 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49925 return -EMFILE;
49926
49927 diff --git a/fs/filesystems.c b/fs/filesystems.c
49928 index a24c58e..53f91ee 100644
49929 --- a/fs/filesystems.c
49930 +++ b/fs/filesystems.c
49931 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
49932 int len = dot ? dot - name : strlen(name);
49933
49934 fs = __get_fs_type(name, len);
49935 +
49936 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
49937 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
49938 +#else
49939 if (!fs && (request_module("%.*s", len, name) == 0))
49940 +#endif
49941 fs = __get_fs_type(name, len);
49942
49943 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
49944 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
49945 index eee0590..1181166 100644
49946 --- a/fs/fs_struct.c
49947 +++ b/fs/fs_struct.c
49948 @@ -4,6 +4,7 @@
49949 #include <linux/path.h>
49950 #include <linux/slab.h>
49951 #include <linux/fs_struct.h>
49952 +#include <linux/grsecurity.h>
49953
49954 /*
49955 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
49956 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
49957 old_root = fs->root;
49958 fs->root = *path;
49959 path_get(path);
49960 + gr_set_chroot_entries(current, path);
49961 write_unlock(&fs->lock);
49962 if (old_root.dentry)
49963 path_put(&old_root);
49964 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
49965 && fs->root.mnt == old_root->mnt) {
49966 path_get(new_root);
49967 fs->root = *new_root;
49968 + gr_set_chroot_entries(p, new_root);
49969 count++;
49970 }
49971 if (fs->pwd.dentry == old_root->dentry
49972 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
49973 task_lock(tsk);
49974 write_lock(&fs->lock);
49975 tsk->fs = NULL;
49976 - kill = !--fs->users;
49977 + gr_clear_chroot_entries(tsk);
49978 + kill = !atomic_dec_return(&fs->users);
49979 write_unlock(&fs->lock);
49980 task_unlock(tsk);
49981 if (kill)
49982 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
49983 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
49984 /* We don't need to lock fs - think why ;-) */
49985 if (fs) {
49986 - fs->users = 1;
49987 + atomic_set(&fs->users, 1);
49988 fs->in_exec = 0;
49989 rwlock_init(&fs->lock);
49990 fs->umask = old->umask;
49991 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
49992
49993 task_lock(current);
49994 write_lock(&fs->lock);
49995 - kill = !--fs->users;
49996 + kill = !atomic_dec_return(&fs->users);
49997 current->fs = new_fs;
49998 + gr_set_chroot_entries(current, &new_fs->root);
49999 write_unlock(&fs->lock);
50000 task_unlock(current);
50001
50002 @@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
50003
50004 int current_umask(void)
50005 {
50006 - return current->fs->umask;
50007 + return current->fs->umask | gr_acl_umask();
50008 }
50009 EXPORT_SYMBOL(current_umask);
50010
50011 /* to be mentioned only in INIT_TASK */
50012 struct fs_struct init_fs = {
50013 - .users = 1,
50014 + .users = ATOMIC_INIT(1),
50015 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
50016 .umask = 0022,
50017 };
50018 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
50019 task_lock(current);
50020
50021 write_lock(&init_fs.lock);
50022 - init_fs.users++;
50023 + atomic_inc(&init_fs.users);
50024 write_unlock(&init_fs.lock);
50025
50026 write_lock(&fs->lock);
50027 current->fs = &init_fs;
50028 - kill = !--fs->users;
50029 + gr_set_chroot_entries(current, &current->fs->root);
50030 + kill = !atomic_dec_return(&fs->users);
50031 write_unlock(&fs->lock);
50032
50033 task_unlock(current);
50034 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
50035 index 9905350..02eaec4 100644
50036 --- a/fs/fscache/cookie.c
50037 +++ b/fs/fscache/cookie.c
50038 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
50039 parent ? (char *) parent->def->name : "<no-parent>",
50040 def->name, netfs_data);
50041
50042 - fscache_stat(&fscache_n_acquires);
50043 + fscache_stat_unchecked(&fscache_n_acquires);
50044
50045 /* if there's no parent cookie, then we don't create one here either */
50046 if (!parent) {
50047 - fscache_stat(&fscache_n_acquires_null);
50048 + fscache_stat_unchecked(&fscache_n_acquires_null);
50049 _leave(" [no parent]");
50050 return NULL;
50051 }
50052 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
50053 /* allocate and initialise a cookie */
50054 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
50055 if (!cookie) {
50056 - fscache_stat(&fscache_n_acquires_oom);
50057 + fscache_stat_unchecked(&fscache_n_acquires_oom);
50058 _leave(" [ENOMEM]");
50059 return NULL;
50060 }
50061 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
50062
50063 switch (cookie->def->type) {
50064 case FSCACHE_COOKIE_TYPE_INDEX:
50065 - fscache_stat(&fscache_n_cookie_index);
50066 + fscache_stat_unchecked(&fscache_n_cookie_index);
50067 break;
50068 case FSCACHE_COOKIE_TYPE_DATAFILE:
50069 - fscache_stat(&fscache_n_cookie_data);
50070 + fscache_stat_unchecked(&fscache_n_cookie_data);
50071 break;
50072 default:
50073 - fscache_stat(&fscache_n_cookie_special);
50074 + fscache_stat_unchecked(&fscache_n_cookie_special);
50075 break;
50076 }
50077
50078 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
50079 if (fscache_acquire_non_index_cookie(cookie) < 0) {
50080 atomic_dec(&parent->n_children);
50081 __fscache_cookie_put(cookie);
50082 - fscache_stat(&fscache_n_acquires_nobufs);
50083 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
50084 _leave(" = NULL");
50085 return NULL;
50086 }
50087 }
50088
50089 - fscache_stat(&fscache_n_acquires_ok);
50090 + fscache_stat_unchecked(&fscache_n_acquires_ok);
50091 _leave(" = %p", cookie);
50092 return cookie;
50093 }
50094 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
50095 cache = fscache_select_cache_for_object(cookie->parent);
50096 if (!cache) {
50097 up_read(&fscache_addremove_sem);
50098 - fscache_stat(&fscache_n_acquires_no_cache);
50099 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
50100 _leave(" = -ENOMEDIUM [no cache]");
50101 return -ENOMEDIUM;
50102 }
50103 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
50104 object = cache->ops->alloc_object(cache, cookie);
50105 fscache_stat_d(&fscache_n_cop_alloc_object);
50106 if (IS_ERR(object)) {
50107 - fscache_stat(&fscache_n_object_no_alloc);
50108 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
50109 ret = PTR_ERR(object);
50110 goto error;
50111 }
50112
50113 - fscache_stat(&fscache_n_object_alloc);
50114 + fscache_stat_unchecked(&fscache_n_object_alloc);
50115
50116 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
50117
50118 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
50119 struct fscache_object *object;
50120 struct hlist_node *_p;
50121
50122 - fscache_stat(&fscache_n_updates);
50123 + fscache_stat_unchecked(&fscache_n_updates);
50124
50125 if (!cookie) {
50126 - fscache_stat(&fscache_n_updates_null);
50127 + fscache_stat_unchecked(&fscache_n_updates_null);
50128 _leave(" [no cookie]");
50129 return;
50130 }
50131 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
50132 struct fscache_object *object;
50133 unsigned long event;
50134
50135 - fscache_stat(&fscache_n_relinquishes);
50136 + fscache_stat_unchecked(&fscache_n_relinquishes);
50137 if (retire)
50138 - fscache_stat(&fscache_n_relinquishes_retire);
50139 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
50140
50141 if (!cookie) {
50142 - fscache_stat(&fscache_n_relinquishes_null);
50143 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
50144 _leave(" [no cookie]");
50145 return;
50146 }
50147 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
50148
50149 /* wait for the cookie to finish being instantiated (or to fail) */
50150 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
50151 - fscache_stat(&fscache_n_relinquishes_waitcrt);
50152 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
50153 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
50154 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
50155 }
50156 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
50157 index edd7434..0725e66 100644
50158 --- a/fs/fscache/internal.h
50159 +++ b/fs/fscache/internal.h
50160 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
50161 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
50162 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
50163
50164 -extern atomic_t fscache_n_op_pend;
50165 -extern atomic_t fscache_n_op_run;
50166 -extern atomic_t fscache_n_op_enqueue;
50167 -extern atomic_t fscache_n_op_deferred_release;
50168 -extern atomic_t fscache_n_op_release;
50169 -extern atomic_t fscache_n_op_gc;
50170 -extern atomic_t fscache_n_op_cancelled;
50171 -extern atomic_t fscache_n_op_rejected;
50172 +extern atomic_unchecked_t fscache_n_op_pend;
50173 +extern atomic_unchecked_t fscache_n_op_run;
50174 +extern atomic_unchecked_t fscache_n_op_enqueue;
50175 +extern atomic_unchecked_t fscache_n_op_deferred_release;
50176 +extern atomic_unchecked_t fscache_n_op_release;
50177 +extern atomic_unchecked_t fscache_n_op_gc;
50178 +extern atomic_unchecked_t fscache_n_op_cancelled;
50179 +extern atomic_unchecked_t fscache_n_op_rejected;
50180
50181 -extern atomic_t fscache_n_attr_changed;
50182 -extern atomic_t fscache_n_attr_changed_ok;
50183 -extern atomic_t fscache_n_attr_changed_nobufs;
50184 -extern atomic_t fscache_n_attr_changed_nomem;
50185 -extern atomic_t fscache_n_attr_changed_calls;
50186 +extern atomic_unchecked_t fscache_n_attr_changed;
50187 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
50188 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
50189 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
50190 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
50191
50192 -extern atomic_t fscache_n_allocs;
50193 -extern atomic_t fscache_n_allocs_ok;
50194 -extern atomic_t fscache_n_allocs_wait;
50195 -extern atomic_t fscache_n_allocs_nobufs;
50196 -extern atomic_t fscache_n_allocs_intr;
50197 -extern atomic_t fscache_n_allocs_object_dead;
50198 -extern atomic_t fscache_n_alloc_ops;
50199 -extern atomic_t fscache_n_alloc_op_waits;
50200 +extern atomic_unchecked_t fscache_n_allocs;
50201 +extern atomic_unchecked_t fscache_n_allocs_ok;
50202 +extern atomic_unchecked_t fscache_n_allocs_wait;
50203 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
50204 +extern atomic_unchecked_t fscache_n_allocs_intr;
50205 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
50206 +extern atomic_unchecked_t fscache_n_alloc_ops;
50207 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
50208
50209 -extern atomic_t fscache_n_retrievals;
50210 -extern atomic_t fscache_n_retrievals_ok;
50211 -extern atomic_t fscache_n_retrievals_wait;
50212 -extern atomic_t fscache_n_retrievals_nodata;
50213 -extern atomic_t fscache_n_retrievals_nobufs;
50214 -extern atomic_t fscache_n_retrievals_intr;
50215 -extern atomic_t fscache_n_retrievals_nomem;
50216 -extern atomic_t fscache_n_retrievals_object_dead;
50217 -extern atomic_t fscache_n_retrieval_ops;
50218 -extern atomic_t fscache_n_retrieval_op_waits;
50219 +extern atomic_unchecked_t fscache_n_retrievals;
50220 +extern atomic_unchecked_t fscache_n_retrievals_ok;
50221 +extern atomic_unchecked_t fscache_n_retrievals_wait;
50222 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
50223 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
50224 +extern atomic_unchecked_t fscache_n_retrievals_intr;
50225 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
50226 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
50227 +extern atomic_unchecked_t fscache_n_retrieval_ops;
50228 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
50229
50230 -extern atomic_t fscache_n_stores;
50231 -extern atomic_t fscache_n_stores_ok;
50232 -extern atomic_t fscache_n_stores_again;
50233 -extern atomic_t fscache_n_stores_nobufs;
50234 -extern atomic_t fscache_n_stores_oom;
50235 -extern atomic_t fscache_n_store_ops;
50236 -extern atomic_t fscache_n_store_calls;
50237 -extern atomic_t fscache_n_store_pages;
50238 -extern atomic_t fscache_n_store_radix_deletes;
50239 -extern atomic_t fscache_n_store_pages_over_limit;
50240 +extern atomic_unchecked_t fscache_n_stores;
50241 +extern atomic_unchecked_t fscache_n_stores_ok;
50242 +extern atomic_unchecked_t fscache_n_stores_again;
50243 +extern atomic_unchecked_t fscache_n_stores_nobufs;
50244 +extern atomic_unchecked_t fscache_n_stores_oom;
50245 +extern atomic_unchecked_t fscache_n_store_ops;
50246 +extern atomic_unchecked_t fscache_n_store_calls;
50247 +extern atomic_unchecked_t fscache_n_store_pages;
50248 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
50249 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
50250
50251 -extern atomic_t fscache_n_store_vmscan_not_storing;
50252 -extern atomic_t fscache_n_store_vmscan_gone;
50253 -extern atomic_t fscache_n_store_vmscan_busy;
50254 -extern atomic_t fscache_n_store_vmscan_cancelled;
50255 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50256 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
50257 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
50258 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50259
50260 -extern atomic_t fscache_n_marks;
50261 -extern atomic_t fscache_n_uncaches;
50262 +extern atomic_unchecked_t fscache_n_marks;
50263 +extern atomic_unchecked_t fscache_n_uncaches;
50264
50265 -extern atomic_t fscache_n_acquires;
50266 -extern atomic_t fscache_n_acquires_null;
50267 -extern atomic_t fscache_n_acquires_no_cache;
50268 -extern atomic_t fscache_n_acquires_ok;
50269 -extern atomic_t fscache_n_acquires_nobufs;
50270 -extern atomic_t fscache_n_acquires_oom;
50271 +extern atomic_unchecked_t fscache_n_acquires;
50272 +extern atomic_unchecked_t fscache_n_acquires_null;
50273 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
50274 +extern atomic_unchecked_t fscache_n_acquires_ok;
50275 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
50276 +extern atomic_unchecked_t fscache_n_acquires_oom;
50277
50278 -extern atomic_t fscache_n_updates;
50279 -extern atomic_t fscache_n_updates_null;
50280 -extern atomic_t fscache_n_updates_run;
50281 +extern atomic_unchecked_t fscache_n_updates;
50282 +extern atomic_unchecked_t fscache_n_updates_null;
50283 +extern atomic_unchecked_t fscache_n_updates_run;
50284
50285 -extern atomic_t fscache_n_relinquishes;
50286 -extern atomic_t fscache_n_relinquishes_null;
50287 -extern atomic_t fscache_n_relinquishes_waitcrt;
50288 -extern atomic_t fscache_n_relinquishes_retire;
50289 +extern atomic_unchecked_t fscache_n_relinquishes;
50290 +extern atomic_unchecked_t fscache_n_relinquishes_null;
50291 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50292 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
50293
50294 -extern atomic_t fscache_n_cookie_index;
50295 -extern atomic_t fscache_n_cookie_data;
50296 -extern atomic_t fscache_n_cookie_special;
50297 +extern atomic_unchecked_t fscache_n_cookie_index;
50298 +extern atomic_unchecked_t fscache_n_cookie_data;
50299 +extern atomic_unchecked_t fscache_n_cookie_special;
50300
50301 -extern atomic_t fscache_n_object_alloc;
50302 -extern atomic_t fscache_n_object_no_alloc;
50303 -extern atomic_t fscache_n_object_lookups;
50304 -extern atomic_t fscache_n_object_lookups_negative;
50305 -extern atomic_t fscache_n_object_lookups_positive;
50306 -extern atomic_t fscache_n_object_lookups_timed_out;
50307 -extern atomic_t fscache_n_object_created;
50308 -extern atomic_t fscache_n_object_avail;
50309 -extern atomic_t fscache_n_object_dead;
50310 +extern atomic_unchecked_t fscache_n_object_alloc;
50311 +extern atomic_unchecked_t fscache_n_object_no_alloc;
50312 +extern atomic_unchecked_t fscache_n_object_lookups;
50313 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
50314 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
50315 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
50316 +extern atomic_unchecked_t fscache_n_object_created;
50317 +extern atomic_unchecked_t fscache_n_object_avail;
50318 +extern atomic_unchecked_t fscache_n_object_dead;
50319
50320 -extern atomic_t fscache_n_checkaux_none;
50321 -extern atomic_t fscache_n_checkaux_okay;
50322 -extern atomic_t fscache_n_checkaux_update;
50323 -extern atomic_t fscache_n_checkaux_obsolete;
50324 +extern atomic_unchecked_t fscache_n_checkaux_none;
50325 +extern atomic_unchecked_t fscache_n_checkaux_okay;
50326 +extern atomic_unchecked_t fscache_n_checkaux_update;
50327 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
50328
50329 extern atomic_t fscache_n_cop_alloc_object;
50330 extern atomic_t fscache_n_cop_lookup_object;
50331 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
50332 atomic_inc(stat);
50333 }
50334
50335 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
50336 +{
50337 + atomic_inc_unchecked(stat);
50338 +}
50339 +
50340 static inline void fscache_stat_d(atomic_t *stat)
50341 {
50342 atomic_dec(stat);
50343 @@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
50344
50345 #define __fscache_stat(stat) (NULL)
50346 #define fscache_stat(stat) do {} while (0)
50347 +#define fscache_stat_unchecked(stat) do {} while (0)
50348 #define fscache_stat_d(stat) do {} while (0)
50349 #endif
50350
50351 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
50352 index e513ac5..e888d34 100644
50353 --- a/fs/fscache/object.c
50354 +++ b/fs/fscache/object.c
50355 @@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50356 /* update the object metadata on disk */
50357 case FSCACHE_OBJECT_UPDATING:
50358 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
50359 - fscache_stat(&fscache_n_updates_run);
50360 + fscache_stat_unchecked(&fscache_n_updates_run);
50361 fscache_stat(&fscache_n_cop_update_object);
50362 object->cache->ops->update_object(object);
50363 fscache_stat_d(&fscache_n_cop_update_object);
50364 @@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50365 spin_lock(&object->lock);
50366 object->state = FSCACHE_OBJECT_DEAD;
50367 spin_unlock(&object->lock);
50368 - fscache_stat(&fscache_n_object_dead);
50369 + fscache_stat_unchecked(&fscache_n_object_dead);
50370 goto terminal_transit;
50371
50372 /* handle the parent cache of this object being withdrawn from
50373 @@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50374 spin_lock(&object->lock);
50375 object->state = FSCACHE_OBJECT_DEAD;
50376 spin_unlock(&object->lock);
50377 - fscache_stat(&fscache_n_object_dead);
50378 + fscache_stat_unchecked(&fscache_n_object_dead);
50379 goto terminal_transit;
50380
50381 /* complain about the object being woken up once it is
50382 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50383 parent->cookie->def->name, cookie->def->name,
50384 object->cache->tag->name);
50385
50386 - fscache_stat(&fscache_n_object_lookups);
50387 + fscache_stat_unchecked(&fscache_n_object_lookups);
50388 fscache_stat(&fscache_n_cop_lookup_object);
50389 ret = object->cache->ops->lookup_object(object);
50390 fscache_stat_d(&fscache_n_cop_lookup_object);
50391 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50392 if (ret == -ETIMEDOUT) {
50393 /* probably stuck behind another object, so move this one to
50394 * the back of the queue */
50395 - fscache_stat(&fscache_n_object_lookups_timed_out);
50396 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
50397 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50398 }
50399
50400 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
50401
50402 spin_lock(&object->lock);
50403 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50404 - fscache_stat(&fscache_n_object_lookups_negative);
50405 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
50406
50407 /* transit here to allow write requests to begin stacking up
50408 * and read requests to begin returning ENODATA */
50409 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
50410 * result, in which case there may be data available */
50411 spin_lock(&object->lock);
50412 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50413 - fscache_stat(&fscache_n_object_lookups_positive);
50414 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
50415
50416 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
50417
50418 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
50419 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50420 } else {
50421 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
50422 - fscache_stat(&fscache_n_object_created);
50423 + fscache_stat_unchecked(&fscache_n_object_created);
50424
50425 object->state = FSCACHE_OBJECT_AVAILABLE;
50426 spin_unlock(&object->lock);
50427 @@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
50428 fscache_enqueue_dependents(object);
50429
50430 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
50431 - fscache_stat(&fscache_n_object_avail);
50432 + fscache_stat_unchecked(&fscache_n_object_avail);
50433
50434 _leave("");
50435 }
50436 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50437 enum fscache_checkaux result;
50438
50439 if (!object->cookie->def->check_aux) {
50440 - fscache_stat(&fscache_n_checkaux_none);
50441 + fscache_stat_unchecked(&fscache_n_checkaux_none);
50442 return FSCACHE_CHECKAUX_OKAY;
50443 }
50444
50445 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50446 switch (result) {
50447 /* entry okay as is */
50448 case FSCACHE_CHECKAUX_OKAY:
50449 - fscache_stat(&fscache_n_checkaux_okay);
50450 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
50451 break;
50452
50453 /* entry requires update */
50454 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
50455 - fscache_stat(&fscache_n_checkaux_update);
50456 + fscache_stat_unchecked(&fscache_n_checkaux_update);
50457 break;
50458
50459 /* entry requires deletion */
50460 case FSCACHE_CHECKAUX_OBSOLETE:
50461 - fscache_stat(&fscache_n_checkaux_obsolete);
50462 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
50463 break;
50464
50465 default:
50466 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
50467 index 313e79a..775240f 100644
50468 --- a/fs/fscache/operation.c
50469 +++ b/fs/fscache/operation.c
50470 @@ -16,7 +16,7 @@
50471 #include <linux/seq_file.h>
50472 #include "internal.h"
50473
50474 -atomic_t fscache_op_debug_id;
50475 +atomic_unchecked_t fscache_op_debug_id;
50476 EXPORT_SYMBOL(fscache_op_debug_id);
50477
50478 /**
50479 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
50480 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
50481 ASSERTCMP(atomic_read(&op->usage), >, 0);
50482
50483 - fscache_stat(&fscache_n_op_enqueue);
50484 + fscache_stat_unchecked(&fscache_n_op_enqueue);
50485 switch (op->flags & FSCACHE_OP_TYPE) {
50486 case FSCACHE_OP_FAST:
50487 _debug("queue fast");
50488 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
50489 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
50490 if (op->processor)
50491 fscache_enqueue_operation(op);
50492 - fscache_stat(&fscache_n_op_run);
50493 + fscache_stat_unchecked(&fscache_n_op_run);
50494 }
50495
50496 /*
50497 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50498 if (object->n_ops > 0) {
50499 atomic_inc(&op->usage);
50500 list_add_tail(&op->pend_link, &object->pending_ops);
50501 - fscache_stat(&fscache_n_op_pend);
50502 + fscache_stat_unchecked(&fscache_n_op_pend);
50503 } else if (!list_empty(&object->pending_ops)) {
50504 atomic_inc(&op->usage);
50505 list_add_tail(&op->pend_link, &object->pending_ops);
50506 - fscache_stat(&fscache_n_op_pend);
50507 + fscache_stat_unchecked(&fscache_n_op_pend);
50508 fscache_start_operations(object);
50509 } else {
50510 ASSERTCMP(object->n_in_progress, ==, 0);
50511 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50512 object->n_exclusive++; /* reads and writes must wait */
50513 atomic_inc(&op->usage);
50514 list_add_tail(&op->pend_link, &object->pending_ops);
50515 - fscache_stat(&fscache_n_op_pend);
50516 + fscache_stat_unchecked(&fscache_n_op_pend);
50517 ret = 0;
50518 } else {
50519 /* not allowed to submit ops in any other state */
50520 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
50521 if (object->n_exclusive > 0) {
50522 atomic_inc(&op->usage);
50523 list_add_tail(&op->pend_link, &object->pending_ops);
50524 - fscache_stat(&fscache_n_op_pend);
50525 + fscache_stat_unchecked(&fscache_n_op_pend);
50526 } else if (!list_empty(&object->pending_ops)) {
50527 atomic_inc(&op->usage);
50528 list_add_tail(&op->pend_link, &object->pending_ops);
50529 - fscache_stat(&fscache_n_op_pend);
50530 + fscache_stat_unchecked(&fscache_n_op_pend);
50531 fscache_start_operations(object);
50532 } else {
50533 ASSERTCMP(object->n_exclusive, ==, 0);
50534 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
50535 object->n_ops++;
50536 atomic_inc(&op->usage);
50537 list_add_tail(&op->pend_link, &object->pending_ops);
50538 - fscache_stat(&fscache_n_op_pend);
50539 + fscache_stat_unchecked(&fscache_n_op_pend);
50540 ret = 0;
50541 } else if (object->state == FSCACHE_OBJECT_DYING ||
50542 object->state == FSCACHE_OBJECT_LC_DYING ||
50543 object->state == FSCACHE_OBJECT_WITHDRAWING) {
50544 - fscache_stat(&fscache_n_op_rejected);
50545 + fscache_stat_unchecked(&fscache_n_op_rejected);
50546 ret = -ENOBUFS;
50547 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
50548 fscache_report_unexpected_submission(object, op, ostate);
50549 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
50550
50551 ret = -EBUSY;
50552 if (!list_empty(&op->pend_link)) {
50553 - fscache_stat(&fscache_n_op_cancelled);
50554 + fscache_stat_unchecked(&fscache_n_op_cancelled);
50555 list_del_init(&op->pend_link);
50556 object->n_ops--;
50557 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
50558 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
50559 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
50560 BUG();
50561
50562 - fscache_stat(&fscache_n_op_release);
50563 + fscache_stat_unchecked(&fscache_n_op_release);
50564
50565 if (op->release) {
50566 op->release(op);
50567 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
50568 * lock, and defer it otherwise */
50569 if (!spin_trylock(&object->lock)) {
50570 _debug("defer put");
50571 - fscache_stat(&fscache_n_op_deferred_release);
50572 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
50573
50574 cache = object->cache;
50575 spin_lock(&cache->op_gc_list_lock);
50576 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
50577
50578 _debug("GC DEFERRED REL OBJ%x OP%x",
50579 object->debug_id, op->debug_id);
50580 - fscache_stat(&fscache_n_op_gc);
50581 + fscache_stat_unchecked(&fscache_n_op_gc);
50582
50583 ASSERTCMP(atomic_read(&op->usage), ==, 0);
50584
50585 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
50586 index c598ea4..6aac13e 100644
50587 --- a/fs/fscache/page.c
50588 +++ b/fs/fscache/page.c
50589 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50590 val = radix_tree_lookup(&cookie->stores, page->index);
50591 if (!val) {
50592 rcu_read_unlock();
50593 - fscache_stat(&fscache_n_store_vmscan_not_storing);
50594 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
50595 __fscache_uncache_page(cookie, page);
50596 return true;
50597 }
50598 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50599 spin_unlock(&cookie->stores_lock);
50600
50601 if (xpage) {
50602 - fscache_stat(&fscache_n_store_vmscan_cancelled);
50603 - fscache_stat(&fscache_n_store_radix_deletes);
50604 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
50605 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50606 ASSERTCMP(xpage, ==, page);
50607 } else {
50608 - fscache_stat(&fscache_n_store_vmscan_gone);
50609 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
50610 }
50611
50612 wake_up_bit(&cookie->flags, 0);
50613 @@ -106,7 +106,7 @@ page_busy:
50614 /* we might want to wait here, but that could deadlock the allocator as
50615 * the slow-work threads writing to the cache may all end up sleeping
50616 * on memory allocation */
50617 - fscache_stat(&fscache_n_store_vmscan_busy);
50618 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
50619 return false;
50620 }
50621 EXPORT_SYMBOL(__fscache_maybe_release_page);
50622 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
50623 FSCACHE_COOKIE_STORING_TAG);
50624 if (!radix_tree_tag_get(&cookie->stores, page->index,
50625 FSCACHE_COOKIE_PENDING_TAG)) {
50626 - fscache_stat(&fscache_n_store_radix_deletes);
50627 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50628 xpage = radix_tree_delete(&cookie->stores, page->index);
50629 }
50630 spin_unlock(&cookie->stores_lock);
50631 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
50632
50633 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
50634
50635 - fscache_stat(&fscache_n_attr_changed_calls);
50636 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
50637
50638 if (fscache_object_is_active(object)) {
50639 fscache_set_op_state(op, "CallFS");
50640 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50641
50642 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50643
50644 - fscache_stat(&fscache_n_attr_changed);
50645 + fscache_stat_unchecked(&fscache_n_attr_changed);
50646
50647 op = kzalloc(sizeof(*op), GFP_KERNEL);
50648 if (!op) {
50649 - fscache_stat(&fscache_n_attr_changed_nomem);
50650 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
50651 _leave(" = -ENOMEM");
50652 return -ENOMEM;
50653 }
50654 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50655 if (fscache_submit_exclusive_op(object, op) < 0)
50656 goto nobufs;
50657 spin_unlock(&cookie->lock);
50658 - fscache_stat(&fscache_n_attr_changed_ok);
50659 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
50660 fscache_put_operation(op);
50661 _leave(" = 0");
50662 return 0;
50663 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50664 nobufs:
50665 spin_unlock(&cookie->lock);
50666 kfree(op);
50667 - fscache_stat(&fscache_n_attr_changed_nobufs);
50668 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
50669 _leave(" = %d", -ENOBUFS);
50670 return -ENOBUFS;
50671 }
50672 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
50673 /* allocate a retrieval operation and attempt to submit it */
50674 op = kzalloc(sizeof(*op), GFP_NOIO);
50675 if (!op) {
50676 - fscache_stat(&fscache_n_retrievals_nomem);
50677 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50678 return NULL;
50679 }
50680
50681 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50682 return 0;
50683 }
50684
50685 - fscache_stat(&fscache_n_retrievals_wait);
50686 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
50687
50688 jif = jiffies;
50689 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
50690 fscache_wait_bit_interruptible,
50691 TASK_INTERRUPTIBLE) != 0) {
50692 - fscache_stat(&fscache_n_retrievals_intr);
50693 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50694 _leave(" = -ERESTARTSYS");
50695 return -ERESTARTSYS;
50696 }
50697 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50698 */
50699 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50700 struct fscache_retrieval *op,
50701 - atomic_t *stat_op_waits,
50702 - atomic_t *stat_object_dead)
50703 + atomic_unchecked_t *stat_op_waits,
50704 + atomic_unchecked_t *stat_object_dead)
50705 {
50706 int ret;
50707
50708 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50709 goto check_if_dead;
50710
50711 _debug(">>> WT");
50712 - fscache_stat(stat_op_waits);
50713 + fscache_stat_unchecked(stat_op_waits);
50714 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
50715 fscache_wait_bit_interruptible,
50716 TASK_INTERRUPTIBLE) < 0) {
50717 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50718
50719 check_if_dead:
50720 if (unlikely(fscache_object_is_dead(object))) {
50721 - fscache_stat(stat_object_dead);
50722 + fscache_stat_unchecked(stat_object_dead);
50723 return -ENOBUFS;
50724 }
50725 return 0;
50726 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50727
50728 _enter("%p,%p,,,", cookie, page);
50729
50730 - fscache_stat(&fscache_n_retrievals);
50731 + fscache_stat_unchecked(&fscache_n_retrievals);
50732
50733 if (hlist_empty(&cookie->backing_objects))
50734 goto nobufs;
50735 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50736 goto nobufs_unlock;
50737 spin_unlock(&cookie->lock);
50738
50739 - fscache_stat(&fscache_n_retrieval_ops);
50740 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
50741
50742 /* pin the netfs read context in case we need to do the actual netfs
50743 * read because we've encountered a cache read failure */
50744 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50745
50746 error:
50747 if (ret == -ENOMEM)
50748 - fscache_stat(&fscache_n_retrievals_nomem);
50749 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50750 else if (ret == -ERESTARTSYS)
50751 - fscache_stat(&fscache_n_retrievals_intr);
50752 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50753 else if (ret == -ENODATA)
50754 - fscache_stat(&fscache_n_retrievals_nodata);
50755 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50756 else if (ret < 0)
50757 - fscache_stat(&fscache_n_retrievals_nobufs);
50758 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50759 else
50760 - fscache_stat(&fscache_n_retrievals_ok);
50761 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50762
50763 fscache_put_retrieval(op);
50764 _leave(" = %d", ret);
50765 @@ -453,7 +453,7 @@ nobufs_unlock:
50766 spin_unlock(&cookie->lock);
50767 kfree(op);
50768 nobufs:
50769 - fscache_stat(&fscache_n_retrievals_nobufs);
50770 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50771 _leave(" = -ENOBUFS");
50772 return -ENOBUFS;
50773 }
50774 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50775
50776 _enter("%p,,%d,,,", cookie, *nr_pages);
50777
50778 - fscache_stat(&fscache_n_retrievals);
50779 + fscache_stat_unchecked(&fscache_n_retrievals);
50780
50781 if (hlist_empty(&cookie->backing_objects))
50782 goto nobufs;
50783 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50784 goto nobufs_unlock;
50785 spin_unlock(&cookie->lock);
50786
50787 - fscache_stat(&fscache_n_retrieval_ops);
50788 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
50789
50790 /* pin the netfs read context in case we need to do the actual netfs
50791 * read because we've encountered a cache read failure */
50792 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50793
50794 error:
50795 if (ret == -ENOMEM)
50796 - fscache_stat(&fscache_n_retrievals_nomem);
50797 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50798 else if (ret == -ERESTARTSYS)
50799 - fscache_stat(&fscache_n_retrievals_intr);
50800 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50801 else if (ret == -ENODATA)
50802 - fscache_stat(&fscache_n_retrievals_nodata);
50803 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50804 else if (ret < 0)
50805 - fscache_stat(&fscache_n_retrievals_nobufs);
50806 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50807 else
50808 - fscache_stat(&fscache_n_retrievals_ok);
50809 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50810
50811 fscache_put_retrieval(op);
50812 _leave(" = %d", ret);
50813 @@ -570,7 +570,7 @@ nobufs_unlock:
50814 spin_unlock(&cookie->lock);
50815 kfree(op);
50816 nobufs:
50817 - fscache_stat(&fscache_n_retrievals_nobufs);
50818 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50819 _leave(" = -ENOBUFS");
50820 return -ENOBUFS;
50821 }
50822 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50823
50824 _enter("%p,%p,,,", cookie, page);
50825
50826 - fscache_stat(&fscache_n_allocs);
50827 + fscache_stat_unchecked(&fscache_n_allocs);
50828
50829 if (hlist_empty(&cookie->backing_objects))
50830 goto nobufs;
50831 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50832 goto nobufs_unlock;
50833 spin_unlock(&cookie->lock);
50834
50835 - fscache_stat(&fscache_n_alloc_ops);
50836 + fscache_stat_unchecked(&fscache_n_alloc_ops);
50837
50838 ret = fscache_wait_for_retrieval_activation(
50839 object, op,
50840 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50841
50842 error:
50843 if (ret == -ERESTARTSYS)
50844 - fscache_stat(&fscache_n_allocs_intr);
50845 + fscache_stat_unchecked(&fscache_n_allocs_intr);
50846 else if (ret < 0)
50847 - fscache_stat(&fscache_n_allocs_nobufs);
50848 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50849 else
50850 - fscache_stat(&fscache_n_allocs_ok);
50851 + fscache_stat_unchecked(&fscache_n_allocs_ok);
50852
50853 fscache_put_retrieval(op);
50854 _leave(" = %d", ret);
50855 @@ -651,7 +651,7 @@ nobufs_unlock:
50856 spin_unlock(&cookie->lock);
50857 kfree(op);
50858 nobufs:
50859 - fscache_stat(&fscache_n_allocs_nobufs);
50860 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50861 _leave(" = -ENOBUFS");
50862 return -ENOBUFS;
50863 }
50864 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50865
50866 spin_lock(&cookie->stores_lock);
50867
50868 - fscache_stat(&fscache_n_store_calls);
50869 + fscache_stat_unchecked(&fscache_n_store_calls);
50870
50871 /* find a page to store */
50872 page = NULL;
50873 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50874 page = results[0];
50875 _debug("gang %d [%lx]", n, page->index);
50876 if (page->index > op->store_limit) {
50877 - fscache_stat(&fscache_n_store_pages_over_limit);
50878 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
50879 goto superseded;
50880 }
50881
50882 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50883
50884 if (page) {
50885 fscache_set_op_state(&op->op, "Store");
50886 - fscache_stat(&fscache_n_store_pages);
50887 + fscache_stat_unchecked(&fscache_n_store_pages);
50888 fscache_stat(&fscache_n_cop_write_page);
50889 ret = object->cache->ops->write_page(op, page);
50890 fscache_stat_d(&fscache_n_cop_write_page);
50891 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50892 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50893 ASSERT(PageFsCache(page));
50894
50895 - fscache_stat(&fscache_n_stores);
50896 + fscache_stat_unchecked(&fscache_n_stores);
50897
50898 op = kzalloc(sizeof(*op), GFP_NOIO);
50899 if (!op)
50900 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50901 spin_unlock(&cookie->stores_lock);
50902 spin_unlock(&object->lock);
50903
50904 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
50905 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
50906 op->store_limit = object->store_limit;
50907
50908 if (fscache_submit_op(object, &op->op) < 0)
50909 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50910
50911 spin_unlock(&cookie->lock);
50912 radix_tree_preload_end();
50913 - fscache_stat(&fscache_n_store_ops);
50914 - fscache_stat(&fscache_n_stores_ok);
50915 + fscache_stat_unchecked(&fscache_n_store_ops);
50916 + fscache_stat_unchecked(&fscache_n_stores_ok);
50917
50918 /* the slow work queue now carries its own ref on the object */
50919 fscache_put_operation(&op->op);
50920 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50921 return 0;
50922
50923 already_queued:
50924 - fscache_stat(&fscache_n_stores_again);
50925 + fscache_stat_unchecked(&fscache_n_stores_again);
50926 already_pending:
50927 spin_unlock(&cookie->stores_lock);
50928 spin_unlock(&object->lock);
50929 spin_unlock(&cookie->lock);
50930 radix_tree_preload_end();
50931 kfree(op);
50932 - fscache_stat(&fscache_n_stores_ok);
50933 + fscache_stat_unchecked(&fscache_n_stores_ok);
50934 _leave(" = 0");
50935 return 0;
50936
50937 @@ -886,14 +886,14 @@ nobufs:
50938 spin_unlock(&cookie->lock);
50939 radix_tree_preload_end();
50940 kfree(op);
50941 - fscache_stat(&fscache_n_stores_nobufs);
50942 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
50943 _leave(" = -ENOBUFS");
50944 return -ENOBUFS;
50945
50946 nomem_free:
50947 kfree(op);
50948 nomem:
50949 - fscache_stat(&fscache_n_stores_oom);
50950 + fscache_stat_unchecked(&fscache_n_stores_oom);
50951 _leave(" = -ENOMEM");
50952 return -ENOMEM;
50953 }
50954 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
50955 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50956 ASSERTCMP(page, !=, NULL);
50957
50958 - fscache_stat(&fscache_n_uncaches);
50959 + fscache_stat_unchecked(&fscache_n_uncaches);
50960
50961 /* cache withdrawal may beat us to it */
50962 if (!PageFsCache(page))
50963 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
50964 unsigned long loop;
50965
50966 #ifdef CONFIG_FSCACHE_STATS
50967 - atomic_add(pagevec->nr, &fscache_n_marks);
50968 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
50969 #endif
50970
50971 for (loop = 0; loop < pagevec->nr; loop++) {
50972 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
50973 index 46435f3..8cddf18 100644
50974 --- a/fs/fscache/stats.c
50975 +++ b/fs/fscache/stats.c
50976 @@ -18,95 +18,95 @@
50977 /*
50978 * operation counters
50979 */
50980 -atomic_t fscache_n_op_pend;
50981 -atomic_t fscache_n_op_run;
50982 -atomic_t fscache_n_op_enqueue;
50983 -atomic_t fscache_n_op_requeue;
50984 -atomic_t fscache_n_op_deferred_release;
50985 -atomic_t fscache_n_op_release;
50986 -atomic_t fscache_n_op_gc;
50987 -atomic_t fscache_n_op_cancelled;
50988 -atomic_t fscache_n_op_rejected;
50989 +atomic_unchecked_t fscache_n_op_pend;
50990 +atomic_unchecked_t fscache_n_op_run;
50991 +atomic_unchecked_t fscache_n_op_enqueue;
50992 +atomic_unchecked_t fscache_n_op_requeue;
50993 +atomic_unchecked_t fscache_n_op_deferred_release;
50994 +atomic_unchecked_t fscache_n_op_release;
50995 +atomic_unchecked_t fscache_n_op_gc;
50996 +atomic_unchecked_t fscache_n_op_cancelled;
50997 +atomic_unchecked_t fscache_n_op_rejected;
50998
50999 -atomic_t fscache_n_attr_changed;
51000 -atomic_t fscache_n_attr_changed_ok;
51001 -atomic_t fscache_n_attr_changed_nobufs;
51002 -atomic_t fscache_n_attr_changed_nomem;
51003 -atomic_t fscache_n_attr_changed_calls;
51004 +atomic_unchecked_t fscache_n_attr_changed;
51005 +atomic_unchecked_t fscache_n_attr_changed_ok;
51006 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
51007 +atomic_unchecked_t fscache_n_attr_changed_nomem;
51008 +atomic_unchecked_t fscache_n_attr_changed_calls;
51009
51010 -atomic_t fscache_n_allocs;
51011 -atomic_t fscache_n_allocs_ok;
51012 -atomic_t fscache_n_allocs_wait;
51013 -atomic_t fscache_n_allocs_nobufs;
51014 -atomic_t fscache_n_allocs_intr;
51015 -atomic_t fscache_n_allocs_object_dead;
51016 -atomic_t fscache_n_alloc_ops;
51017 -atomic_t fscache_n_alloc_op_waits;
51018 +atomic_unchecked_t fscache_n_allocs;
51019 +atomic_unchecked_t fscache_n_allocs_ok;
51020 +atomic_unchecked_t fscache_n_allocs_wait;
51021 +atomic_unchecked_t fscache_n_allocs_nobufs;
51022 +atomic_unchecked_t fscache_n_allocs_intr;
51023 +atomic_unchecked_t fscache_n_allocs_object_dead;
51024 +atomic_unchecked_t fscache_n_alloc_ops;
51025 +atomic_unchecked_t fscache_n_alloc_op_waits;
51026
51027 -atomic_t fscache_n_retrievals;
51028 -atomic_t fscache_n_retrievals_ok;
51029 -atomic_t fscache_n_retrievals_wait;
51030 -atomic_t fscache_n_retrievals_nodata;
51031 -atomic_t fscache_n_retrievals_nobufs;
51032 -atomic_t fscache_n_retrievals_intr;
51033 -atomic_t fscache_n_retrievals_nomem;
51034 -atomic_t fscache_n_retrievals_object_dead;
51035 -atomic_t fscache_n_retrieval_ops;
51036 -atomic_t fscache_n_retrieval_op_waits;
51037 +atomic_unchecked_t fscache_n_retrievals;
51038 +atomic_unchecked_t fscache_n_retrievals_ok;
51039 +atomic_unchecked_t fscache_n_retrievals_wait;
51040 +atomic_unchecked_t fscache_n_retrievals_nodata;
51041 +atomic_unchecked_t fscache_n_retrievals_nobufs;
51042 +atomic_unchecked_t fscache_n_retrievals_intr;
51043 +atomic_unchecked_t fscache_n_retrievals_nomem;
51044 +atomic_unchecked_t fscache_n_retrievals_object_dead;
51045 +atomic_unchecked_t fscache_n_retrieval_ops;
51046 +atomic_unchecked_t fscache_n_retrieval_op_waits;
51047
51048 -atomic_t fscache_n_stores;
51049 -atomic_t fscache_n_stores_ok;
51050 -atomic_t fscache_n_stores_again;
51051 -atomic_t fscache_n_stores_nobufs;
51052 -atomic_t fscache_n_stores_oom;
51053 -atomic_t fscache_n_store_ops;
51054 -atomic_t fscache_n_store_calls;
51055 -atomic_t fscache_n_store_pages;
51056 -atomic_t fscache_n_store_radix_deletes;
51057 -atomic_t fscache_n_store_pages_over_limit;
51058 +atomic_unchecked_t fscache_n_stores;
51059 +atomic_unchecked_t fscache_n_stores_ok;
51060 +atomic_unchecked_t fscache_n_stores_again;
51061 +atomic_unchecked_t fscache_n_stores_nobufs;
51062 +atomic_unchecked_t fscache_n_stores_oom;
51063 +atomic_unchecked_t fscache_n_store_ops;
51064 +atomic_unchecked_t fscache_n_store_calls;
51065 +atomic_unchecked_t fscache_n_store_pages;
51066 +atomic_unchecked_t fscache_n_store_radix_deletes;
51067 +atomic_unchecked_t fscache_n_store_pages_over_limit;
51068
51069 -atomic_t fscache_n_store_vmscan_not_storing;
51070 -atomic_t fscache_n_store_vmscan_gone;
51071 -atomic_t fscache_n_store_vmscan_busy;
51072 -atomic_t fscache_n_store_vmscan_cancelled;
51073 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
51074 +atomic_unchecked_t fscache_n_store_vmscan_gone;
51075 +atomic_unchecked_t fscache_n_store_vmscan_busy;
51076 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
51077
51078 -atomic_t fscache_n_marks;
51079 -atomic_t fscache_n_uncaches;
51080 +atomic_unchecked_t fscache_n_marks;
51081 +atomic_unchecked_t fscache_n_uncaches;
51082
51083 -atomic_t fscache_n_acquires;
51084 -atomic_t fscache_n_acquires_null;
51085 -atomic_t fscache_n_acquires_no_cache;
51086 -atomic_t fscache_n_acquires_ok;
51087 -atomic_t fscache_n_acquires_nobufs;
51088 -atomic_t fscache_n_acquires_oom;
51089 +atomic_unchecked_t fscache_n_acquires;
51090 +atomic_unchecked_t fscache_n_acquires_null;
51091 +atomic_unchecked_t fscache_n_acquires_no_cache;
51092 +atomic_unchecked_t fscache_n_acquires_ok;
51093 +atomic_unchecked_t fscache_n_acquires_nobufs;
51094 +atomic_unchecked_t fscache_n_acquires_oom;
51095
51096 -atomic_t fscache_n_updates;
51097 -atomic_t fscache_n_updates_null;
51098 -atomic_t fscache_n_updates_run;
51099 +atomic_unchecked_t fscache_n_updates;
51100 +atomic_unchecked_t fscache_n_updates_null;
51101 +atomic_unchecked_t fscache_n_updates_run;
51102
51103 -atomic_t fscache_n_relinquishes;
51104 -atomic_t fscache_n_relinquishes_null;
51105 -atomic_t fscache_n_relinquishes_waitcrt;
51106 -atomic_t fscache_n_relinquishes_retire;
51107 +atomic_unchecked_t fscache_n_relinquishes;
51108 +atomic_unchecked_t fscache_n_relinquishes_null;
51109 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
51110 +atomic_unchecked_t fscache_n_relinquishes_retire;
51111
51112 -atomic_t fscache_n_cookie_index;
51113 -atomic_t fscache_n_cookie_data;
51114 -atomic_t fscache_n_cookie_special;
51115 +atomic_unchecked_t fscache_n_cookie_index;
51116 +atomic_unchecked_t fscache_n_cookie_data;
51117 +atomic_unchecked_t fscache_n_cookie_special;
51118
51119 -atomic_t fscache_n_object_alloc;
51120 -atomic_t fscache_n_object_no_alloc;
51121 -atomic_t fscache_n_object_lookups;
51122 -atomic_t fscache_n_object_lookups_negative;
51123 -atomic_t fscache_n_object_lookups_positive;
51124 -atomic_t fscache_n_object_lookups_timed_out;
51125 -atomic_t fscache_n_object_created;
51126 -atomic_t fscache_n_object_avail;
51127 -atomic_t fscache_n_object_dead;
51128 +atomic_unchecked_t fscache_n_object_alloc;
51129 +atomic_unchecked_t fscache_n_object_no_alloc;
51130 +atomic_unchecked_t fscache_n_object_lookups;
51131 +atomic_unchecked_t fscache_n_object_lookups_negative;
51132 +atomic_unchecked_t fscache_n_object_lookups_positive;
51133 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
51134 +atomic_unchecked_t fscache_n_object_created;
51135 +atomic_unchecked_t fscache_n_object_avail;
51136 +atomic_unchecked_t fscache_n_object_dead;
51137
51138 -atomic_t fscache_n_checkaux_none;
51139 -atomic_t fscache_n_checkaux_okay;
51140 -atomic_t fscache_n_checkaux_update;
51141 -atomic_t fscache_n_checkaux_obsolete;
51142 +atomic_unchecked_t fscache_n_checkaux_none;
51143 +atomic_unchecked_t fscache_n_checkaux_okay;
51144 +atomic_unchecked_t fscache_n_checkaux_update;
51145 +atomic_unchecked_t fscache_n_checkaux_obsolete;
51146
51147 atomic_t fscache_n_cop_alloc_object;
51148 atomic_t fscache_n_cop_lookup_object;
51149 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
51150 seq_puts(m, "FS-Cache statistics\n");
51151
51152 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
51153 - atomic_read(&fscache_n_cookie_index),
51154 - atomic_read(&fscache_n_cookie_data),
51155 - atomic_read(&fscache_n_cookie_special));
51156 + atomic_read_unchecked(&fscache_n_cookie_index),
51157 + atomic_read_unchecked(&fscache_n_cookie_data),
51158 + atomic_read_unchecked(&fscache_n_cookie_special));
51159
51160 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
51161 - atomic_read(&fscache_n_object_alloc),
51162 - atomic_read(&fscache_n_object_no_alloc),
51163 - atomic_read(&fscache_n_object_avail),
51164 - atomic_read(&fscache_n_object_dead));
51165 + atomic_read_unchecked(&fscache_n_object_alloc),
51166 + atomic_read_unchecked(&fscache_n_object_no_alloc),
51167 + atomic_read_unchecked(&fscache_n_object_avail),
51168 + atomic_read_unchecked(&fscache_n_object_dead));
51169 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
51170 - atomic_read(&fscache_n_checkaux_none),
51171 - atomic_read(&fscache_n_checkaux_okay),
51172 - atomic_read(&fscache_n_checkaux_update),
51173 - atomic_read(&fscache_n_checkaux_obsolete));
51174 + atomic_read_unchecked(&fscache_n_checkaux_none),
51175 + atomic_read_unchecked(&fscache_n_checkaux_okay),
51176 + atomic_read_unchecked(&fscache_n_checkaux_update),
51177 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
51178
51179 seq_printf(m, "Pages : mrk=%u unc=%u\n",
51180 - atomic_read(&fscache_n_marks),
51181 - atomic_read(&fscache_n_uncaches));
51182 + atomic_read_unchecked(&fscache_n_marks),
51183 + atomic_read_unchecked(&fscache_n_uncaches));
51184
51185 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
51186 " oom=%u\n",
51187 - atomic_read(&fscache_n_acquires),
51188 - atomic_read(&fscache_n_acquires_null),
51189 - atomic_read(&fscache_n_acquires_no_cache),
51190 - atomic_read(&fscache_n_acquires_ok),
51191 - atomic_read(&fscache_n_acquires_nobufs),
51192 - atomic_read(&fscache_n_acquires_oom));
51193 + atomic_read_unchecked(&fscache_n_acquires),
51194 + atomic_read_unchecked(&fscache_n_acquires_null),
51195 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
51196 + atomic_read_unchecked(&fscache_n_acquires_ok),
51197 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
51198 + atomic_read_unchecked(&fscache_n_acquires_oom));
51199
51200 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
51201 - atomic_read(&fscache_n_object_lookups),
51202 - atomic_read(&fscache_n_object_lookups_negative),
51203 - atomic_read(&fscache_n_object_lookups_positive),
51204 - atomic_read(&fscache_n_object_lookups_timed_out),
51205 - atomic_read(&fscache_n_object_created));
51206 + atomic_read_unchecked(&fscache_n_object_lookups),
51207 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
51208 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
51209 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
51210 + atomic_read_unchecked(&fscache_n_object_created));
51211
51212 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
51213 - atomic_read(&fscache_n_updates),
51214 - atomic_read(&fscache_n_updates_null),
51215 - atomic_read(&fscache_n_updates_run));
51216 + atomic_read_unchecked(&fscache_n_updates),
51217 + atomic_read_unchecked(&fscache_n_updates_null),
51218 + atomic_read_unchecked(&fscache_n_updates_run));
51219
51220 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
51221 - atomic_read(&fscache_n_relinquishes),
51222 - atomic_read(&fscache_n_relinquishes_null),
51223 - atomic_read(&fscache_n_relinquishes_waitcrt),
51224 - atomic_read(&fscache_n_relinquishes_retire));
51225 + atomic_read_unchecked(&fscache_n_relinquishes),
51226 + atomic_read_unchecked(&fscache_n_relinquishes_null),
51227 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
51228 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
51229
51230 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
51231 - atomic_read(&fscache_n_attr_changed),
51232 - atomic_read(&fscache_n_attr_changed_ok),
51233 - atomic_read(&fscache_n_attr_changed_nobufs),
51234 - atomic_read(&fscache_n_attr_changed_nomem),
51235 - atomic_read(&fscache_n_attr_changed_calls));
51236 + atomic_read_unchecked(&fscache_n_attr_changed),
51237 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
51238 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
51239 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
51240 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
51241
51242 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
51243 - atomic_read(&fscache_n_allocs),
51244 - atomic_read(&fscache_n_allocs_ok),
51245 - atomic_read(&fscache_n_allocs_wait),
51246 - atomic_read(&fscache_n_allocs_nobufs),
51247 - atomic_read(&fscache_n_allocs_intr));
51248 + atomic_read_unchecked(&fscache_n_allocs),
51249 + atomic_read_unchecked(&fscache_n_allocs_ok),
51250 + atomic_read_unchecked(&fscache_n_allocs_wait),
51251 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
51252 + atomic_read_unchecked(&fscache_n_allocs_intr));
51253 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
51254 - atomic_read(&fscache_n_alloc_ops),
51255 - atomic_read(&fscache_n_alloc_op_waits),
51256 - atomic_read(&fscache_n_allocs_object_dead));
51257 + atomic_read_unchecked(&fscache_n_alloc_ops),
51258 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
51259 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
51260
51261 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
51262 " int=%u oom=%u\n",
51263 - atomic_read(&fscache_n_retrievals),
51264 - atomic_read(&fscache_n_retrievals_ok),
51265 - atomic_read(&fscache_n_retrievals_wait),
51266 - atomic_read(&fscache_n_retrievals_nodata),
51267 - atomic_read(&fscache_n_retrievals_nobufs),
51268 - atomic_read(&fscache_n_retrievals_intr),
51269 - atomic_read(&fscache_n_retrievals_nomem));
51270 + atomic_read_unchecked(&fscache_n_retrievals),
51271 + atomic_read_unchecked(&fscache_n_retrievals_ok),
51272 + atomic_read_unchecked(&fscache_n_retrievals_wait),
51273 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
51274 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
51275 + atomic_read_unchecked(&fscache_n_retrievals_intr),
51276 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
51277 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
51278 - atomic_read(&fscache_n_retrieval_ops),
51279 - atomic_read(&fscache_n_retrieval_op_waits),
51280 - atomic_read(&fscache_n_retrievals_object_dead));
51281 + atomic_read_unchecked(&fscache_n_retrieval_ops),
51282 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
51283 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
51284
51285 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
51286 - atomic_read(&fscache_n_stores),
51287 - atomic_read(&fscache_n_stores_ok),
51288 - atomic_read(&fscache_n_stores_again),
51289 - atomic_read(&fscache_n_stores_nobufs),
51290 - atomic_read(&fscache_n_stores_oom));
51291 + atomic_read_unchecked(&fscache_n_stores),
51292 + atomic_read_unchecked(&fscache_n_stores_ok),
51293 + atomic_read_unchecked(&fscache_n_stores_again),
51294 + atomic_read_unchecked(&fscache_n_stores_nobufs),
51295 + atomic_read_unchecked(&fscache_n_stores_oom));
51296 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
51297 - atomic_read(&fscache_n_store_ops),
51298 - atomic_read(&fscache_n_store_calls),
51299 - atomic_read(&fscache_n_store_pages),
51300 - atomic_read(&fscache_n_store_radix_deletes),
51301 - atomic_read(&fscache_n_store_pages_over_limit));
51302 + atomic_read_unchecked(&fscache_n_store_ops),
51303 + atomic_read_unchecked(&fscache_n_store_calls),
51304 + atomic_read_unchecked(&fscache_n_store_pages),
51305 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
51306 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
51307
51308 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
51309 - atomic_read(&fscache_n_store_vmscan_not_storing),
51310 - atomic_read(&fscache_n_store_vmscan_gone),
51311 - atomic_read(&fscache_n_store_vmscan_busy),
51312 - atomic_read(&fscache_n_store_vmscan_cancelled));
51313 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
51314 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
51315 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
51316 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
51317
51318 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
51319 - atomic_read(&fscache_n_op_pend),
51320 - atomic_read(&fscache_n_op_run),
51321 - atomic_read(&fscache_n_op_enqueue),
51322 - atomic_read(&fscache_n_op_cancelled),
51323 - atomic_read(&fscache_n_op_rejected));
51324 + atomic_read_unchecked(&fscache_n_op_pend),
51325 + atomic_read_unchecked(&fscache_n_op_run),
51326 + atomic_read_unchecked(&fscache_n_op_enqueue),
51327 + atomic_read_unchecked(&fscache_n_op_cancelled),
51328 + atomic_read_unchecked(&fscache_n_op_rejected));
51329 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
51330 - atomic_read(&fscache_n_op_deferred_release),
51331 - atomic_read(&fscache_n_op_release),
51332 - atomic_read(&fscache_n_op_gc));
51333 + atomic_read_unchecked(&fscache_n_op_deferred_release),
51334 + atomic_read_unchecked(&fscache_n_op_release),
51335 + atomic_read_unchecked(&fscache_n_op_gc));
51336
51337 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
51338 atomic_read(&fscache_n_cop_alloc_object),
51339 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
51340 index de792dc..448b532 100644
51341 --- a/fs/fuse/cuse.c
51342 +++ b/fs/fuse/cuse.c
51343 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
51344 INIT_LIST_HEAD(&cuse_conntbl[i]);
51345
51346 /* inherit and extend fuse_dev_operations */
51347 - cuse_channel_fops = fuse_dev_operations;
51348 - cuse_channel_fops.owner = THIS_MODULE;
51349 - cuse_channel_fops.open = cuse_channel_open;
51350 - cuse_channel_fops.release = cuse_channel_release;
51351 + pax_open_kernel();
51352 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
51353 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
51354 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
51355 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
51356 + pax_close_kernel();
51357
51358 cuse_class = class_create(THIS_MODULE, "cuse");
51359 if (IS_ERR(cuse_class))
51360 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
51361 index 1facb39..7f48557 100644
51362 --- a/fs/fuse/dev.c
51363 +++ b/fs/fuse/dev.c
51364 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51365 {
51366 struct fuse_notify_inval_entry_out outarg;
51367 int err = -EINVAL;
51368 - char buf[FUSE_NAME_MAX+1];
51369 + char *buf = NULL;
51370 struct qstr name;
51371
51372 if (size < sizeof(outarg))
51373 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51374 if (outarg.namelen > FUSE_NAME_MAX)
51375 goto err;
51376
51377 + err = -ENOMEM;
51378 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
51379 + if (!buf)
51380 + goto err;
51381 +
51382 err = -EINVAL;
51383 if (size != sizeof(outarg) + outarg.namelen + 1)
51384 goto err;
51385 @@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51386
51387 down_read(&fc->killsb);
51388 err = -ENOENT;
51389 - if (!fc->sb)
51390 - goto err_unlock;
51391 -
51392 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51393 -
51394 -err_unlock:
51395 + if (fc->sb)
51396 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51397 up_read(&fc->killsb);
51398 + kfree(buf);
51399 return err;
51400
51401 err:
51402 fuse_copy_finish(cs);
51403 + kfree(buf);
51404 return err;
51405 }
51406
51407 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
51408 index 4787ae6..73efff7 100644
51409 --- a/fs/fuse/dir.c
51410 +++ b/fs/fuse/dir.c
51411 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
51412 return link;
51413 }
51414
51415 -static void free_link(char *link)
51416 +static void free_link(const char *link)
51417 {
51418 if (!IS_ERR(link))
51419 free_page((unsigned long) link);
51420 diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
51421 index 247436c..e650ccb 100644
51422 --- a/fs/gfs2/ops_inode.c
51423 +++ b/fs/gfs2/ops_inode.c
51424 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
51425 unsigned int x;
51426 int error;
51427
51428 + pax_track_stack();
51429 +
51430 if (ndentry->d_inode) {
51431 nip = GFS2_I(ndentry->d_inode);
51432 if (ip == nip)
51433 diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
51434 index 4463297..4fed53b 100644
51435 --- a/fs/gfs2/sys.c
51436 +++ b/fs/gfs2/sys.c
51437 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
51438 return a->store ? a->store(sdp, buf, len) : len;
51439 }
51440
51441 -static struct sysfs_ops gfs2_attr_ops = {
51442 +static const struct sysfs_ops gfs2_attr_ops = {
51443 .show = gfs2_attr_show,
51444 .store = gfs2_attr_store,
51445 };
51446 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
51447 return 0;
51448 }
51449
51450 -static struct kset_uevent_ops gfs2_uevent_ops = {
51451 +static const struct kset_uevent_ops gfs2_uevent_ops = {
51452 .uevent = gfs2_uevent,
51453 };
51454
51455 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
51456 index f6874ac..7cd98a8 100644
51457 --- a/fs/hfsplus/catalog.c
51458 +++ b/fs/hfsplus/catalog.c
51459 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
51460 int err;
51461 u16 type;
51462
51463 + pax_track_stack();
51464 +
51465 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
51466 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
51467 if (err)
51468 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
51469 int entry_size;
51470 int err;
51471
51472 + pax_track_stack();
51473 +
51474 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
51475 sb = dir->i_sb;
51476 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
51477 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
51478 int entry_size, type;
51479 int err = 0;
51480
51481 + pax_track_stack();
51482 +
51483 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
51484 dst_dir->i_ino, dst_name->name);
51485 sb = src_dir->i_sb;
51486 diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
51487 index 5f40236..dac3421 100644
51488 --- a/fs/hfsplus/dir.c
51489 +++ b/fs/hfsplus/dir.c
51490 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
51491 struct hfsplus_readdir_data *rd;
51492 u16 type;
51493
51494 + pax_track_stack();
51495 +
51496 if (filp->f_pos >= inode->i_size)
51497 return 0;
51498
51499 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
51500 index 1bcf597..905a251 100644
51501 --- a/fs/hfsplus/inode.c
51502 +++ b/fs/hfsplus/inode.c
51503 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
51504 int res = 0;
51505 u16 type;
51506
51507 + pax_track_stack();
51508 +
51509 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
51510
51511 HFSPLUS_I(inode).dev = 0;
51512 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
51513 struct hfs_find_data fd;
51514 hfsplus_cat_entry entry;
51515
51516 + pax_track_stack();
51517 +
51518 if (HFSPLUS_IS_RSRC(inode))
51519 main_inode = HFSPLUS_I(inode).rsrc_inode;
51520
51521 diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
51522 index f457d2c..7ef4ad5 100644
51523 --- a/fs/hfsplus/ioctl.c
51524 +++ b/fs/hfsplus/ioctl.c
51525 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
51526 struct hfsplus_cat_file *file;
51527 int res;
51528
51529 + pax_track_stack();
51530 +
51531 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51532 return -EOPNOTSUPP;
51533
51534 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
51535 struct hfsplus_cat_file *file;
51536 ssize_t res = 0;
51537
51538 + pax_track_stack();
51539 +
51540 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51541 return -EOPNOTSUPP;
51542
51543 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
51544 index 43022f3..7298079 100644
51545 --- a/fs/hfsplus/super.c
51546 +++ b/fs/hfsplus/super.c
51547 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
51548 struct nls_table *nls = NULL;
51549 int err = -EINVAL;
51550
51551 + pax_track_stack();
51552 +
51553 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
51554 if (!sbi)
51555 return -ENOMEM;
51556 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
51557 index 87a1258..5694d91 100644
51558 --- a/fs/hugetlbfs/inode.c
51559 +++ b/fs/hugetlbfs/inode.c
51560 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
51561 .kill_sb = kill_litter_super,
51562 };
51563
51564 -static struct vfsmount *hugetlbfs_vfsmount;
51565 +struct vfsmount *hugetlbfs_vfsmount;
51566
51567 static int can_do_hugetlb_shm(void)
51568 {
51569 diff --git a/fs/ioctl.c b/fs/ioctl.c
51570 index 6c75110..19d2c3c 100644
51571 --- a/fs/ioctl.c
51572 +++ b/fs/ioctl.c
51573 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
51574 u64 phys, u64 len, u32 flags)
51575 {
51576 struct fiemap_extent extent;
51577 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
51578 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
51579
51580 /* only count the extents */
51581 if (fieinfo->fi_extents_max == 0) {
51582 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51583
51584 fieinfo.fi_flags = fiemap.fm_flags;
51585 fieinfo.fi_extents_max = fiemap.fm_extent_count;
51586 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
51587 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
51588
51589 if (fiemap.fm_extent_count != 0 &&
51590 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
51591 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51592 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
51593 fiemap.fm_flags = fieinfo.fi_flags;
51594 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
51595 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
51596 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
51597 error = -EFAULT;
51598
51599 return error;
51600 diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
51601 index b0435dd..81ee0be 100644
51602 --- a/fs/jbd/checkpoint.c
51603 +++ b/fs/jbd/checkpoint.c
51604 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
51605 tid_t this_tid;
51606 int result;
51607
51608 + pax_track_stack();
51609 +
51610 jbd_debug(1, "Start checkpoint\n");
51611
51612 /*
51613 diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
51614 index 546d153..736896c 100644
51615 --- a/fs/jffs2/compr_rtime.c
51616 +++ b/fs/jffs2/compr_rtime.c
51617 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
51618 int outpos = 0;
51619 int pos=0;
51620
51621 + pax_track_stack();
51622 +
51623 memset(positions,0,sizeof(positions));
51624
51625 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
51626 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
51627 int outpos = 0;
51628 int pos=0;
51629
51630 + pax_track_stack();
51631 +
51632 memset(positions,0,sizeof(positions));
51633
51634 while (outpos<destlen) {
51635 diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
51636 index 170d289..3254b98 100644
51637 --- a/fs/jffs2/compr_rubin.c
51638 +++ b/fs/jffs2/compr_rubin.c
51639 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
51640 int ret;
51641 uint32_t mysrclen, mydstlen;
51642
51643 + pax_track_stack();
51644 +
51645 mysrclen = *sourcelen;
51646 mydstlen = *dstlen - 8;
51647
51648 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
51649 index b47679b..00d65d3 100644
51650 --- a/fs/jffs2/erase.c
51651 +++ b/fs/jffs2/erase.c
51652 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
51653 struct jffs2_unknown_node marker = {
51654 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
51655 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51656 - .totlen = cpu_to_je32(c->cleanmarker_size)
51657 + .totlen = cpu_to_je32(c->cleanmarker_size),
51658 + .hdr_crc = cpu_to_je32(0)
51659 };
51660
51661 jffs2_prealloc_raw_node_refs(c, jeb, 1);
51662 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
51663 index 5ef7bac..4fd1e3c 100644
51664 --- a/fs/jffs2/wbuf.c
51665 +++ b/fs/jffs2/wbuf.c
51666 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
51667 {
51668 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
51669 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51670 - .totlen = constant_cpu_to_je32(8)
51671 + .totlen = constant_cpu_to_je32(8),
51672 + .hdr_crc = constant_cpu_to_je32(0)
51673 };
51674
51675 /*
51676 diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
51677 index 082e844..52012a1 100644
51678 --- a/fs/jffs2/xattr.c
51679 +++ b/fs/jffs2/xattr.c
51680 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
51681
51682 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
51683
51684 + pax_track_stack();
51685 +
51686 /* Phase.1 : Merge same xref */
51687 for (i=0; i < XREF_TMPHASH_SIZE; i++)
51688 xref_tmphash[i] = NULL;
51689 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
51690 index 2234c73..f6e6e6b 100644
51691 --- a/fs/jfs/super.c
51692 +++ b/fs/jfs/super.c
51693 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
51694
51695 jfs_inode_cachep =
51696 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
51697 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
51698 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
51699 init_once);
51700 if (jfs_inode_cachep == NULL)
51701 return -ENOMEM;
51702 diff --git a/fs/libfs.c b/fs/libfs.c
51703 index ba36e93..3153fce 100644
51704 --- a/fs/libfs.c
51705 +++ b/fs/libfs.c
51706 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
51707
51708 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
51709 struct dentry *next;
51710 + char d_name[sizeof(next->d_iname)];
51711 + const unsigned char *name;
51712 +
51713 next = list_entry(p, struct dentry, d_u.d_child);
51714 if (d_unhashed(next) || !next->d_inode)
51715 continue;
51716
51717 spin_unlock(&dcache_lock);
51718 - if (filldir(dirent, next->d_name.name,
51719 + name = next->d_name.name;
51720 + if (name == next->d_iname) {
51721 + memcpy(d_name, name, next->d_name.len);
51722 + name = d_name;
51723 + }
51724 + if (filldir(dirent, name,
51725 next->d_name.len, filp->f_pos,
51726 next->d_inode->i_ino,
51727 dt_type(next->d_inode)) < 0)
51728 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
51729 index c325a83..d15b07b 100644
51730 --- a/fs/lockd/clntproc.c
51731 +++ b/fs/lockd/clntproc.c
51732 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
51733 /*
51734 * Cookie counter for NLM requests
51735 */
51736 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
51737 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
51738
51739 void nlmclnt_next_cookie(struct nlm_cookie *c)
51740 {
51741 - u32 cookie = atomic_inc_return(&nlm_cookie);
51742 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
51743
51744 memcpy(c->data, &cookie, 4);
51745 c->len=4;
51746 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
51747 struct nlm_rqst reqst, *req;
51748 int status;
51749
51750 + pax_track_stack();
51751 +
51752 req = &reqst;
51753 memset(req, 0, sizeof(*req));
51754 locks_init_lock(&req->a_args.lock.fl);
51755 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
51756 index 1a54ae1..6a16c27 100644
51757 --- a/fs/lockd/svc.c
51758 +++ b/fs/lockd/svc.c
51759 @@ -43,7 +43,7 @@
51760
51761 static struct svc_program nlmsvc_program;
51762
51763 -struct nlmsvc_binding * nlmsvc_ops;
51764 +const struct nlmsvc_binding * nlmsvc_ops;
51765 EXPORT_SYMBOL_GPL(nlmsvc_ops);
51766
51767 static DEFINE_MUTEX(nlmsvc_mutex);
51768 diff --git a/fs/locks.c b/fs/locks.c
51769 index a8794f2..4041e55 100644
51770 --- a/fs/locks.c
51771 +++ b/fs/locks.c
51772 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
51773
51774 static struct kmem_cache *filelock_cache __read_mostly;
51775
51776 +static void locks_init_lock_always(struct file_lock *fl)
51777 +{
51778 + fl->fl_next = NULL;
51779 + fl->fl_fasync = NULL;
51780 + fl->fl_owner = NULL;
51781 + fl->fl_pid = 0;
51782 + fl->fl_nspid = NULL;
51783 + fl->fl_file = NULL;
51784 + fl->fl_flags = 0;
51785 + fl->fl_type = 0;
51786 + fl->fl_start = fl->fl_end = 0;
51787 +}
51788 +
51789 /* Allocate an empty lock structure. */
51790 static struct file_lock *locks_alloc_lock(void)
51791 {
51792 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51793 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51794 +
51795 + if (fl)
51796 + locks_init_lock_always(fl);
51797 +
51798 + return fl;
51799 }
51800
51801 void locks_release_private(struct file_lock *fl)
51802 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
51803 INIT_LIST_HEAD(&fl->fl_link);
51804 INIT_LIST_HEAD(&fl->fl_block);
51805 init_waitqueue_head(&fl->fl_wait);
51806 - fl->fl_next = NULL;
51807 - fl->fl_fasync = NULL;
51808 - fl->fl_owner = NULL;
51809 - fl->fl_pid = 0;
51810 - fl->fl_nspid = NULL;
51811 - fl->fl_file = NULL;
51812 - fl->fl_flags = 0;
51813 - fl->fl_type = 0;
51814 - fl->fl_start = fl->fl_end = 0;
51815 fl->fl_ops = NULL;
51816 fl->fl_lmops = NULL;
51817 + locks_init_lock_always(fl);
51818 }
51819
51820 EXPORT_SYMBOL(locks_init_lock);
51821 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
51822 return;
51823
51824 if (filp->f_op && filp->f_op->flock) {
51825 - struct file_lock fl = {
51826 + struct file_lock flock = {
51827 .fl_pid = current->tgid,
51828 .fl_file = filp,
51829 .fl_flags = FL_FLOCK,
51830 .fl_type = F_UNLCK,
51831 .fl_end = OFFSET_MAX,
51832 };
51833 - filp->f_op->flock(filp, F_SETLKW, &fl);
51834 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
51835 - fl.fl_ops->fl_release_private(&fl);
51836 + filp->f_op->flock(filp, F_SETLKW, &flock);
51837 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
51838 + flock.fl_ops->fl_release_private(&flock);
51839 }
51840
51841 lock_kernel();
51842 diff --git a/fs/mbcache.c b/fs/mbcache.c
51843 index ec88ff3..b843a82 100644
51844 --- a/fs/mbcache.c
51845 +++ b/fs/mbcache.c
51846 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
51847 if (!cache)
51848 goto fail;
51849 cache->c_name = name;
51850 - cache->c_op.free = NULL;
51851 + *(void **)&cache->c_op.free = NULL;
51852 if (cache_op)
51853 - cache->c_op.free = cache_op->free;
51854 + *(void **)&cache->c_op.free = cache_op->free;
51855 atomic_set(&cache->c_entry_count, 0);
51856 cache->c_bucket_bits = bucket_bits;
51857 #ifdef MB_CACHE_INDEXES_COUNT
51858 diff --git a/fs/namei.c b/fs/namei.c
51859 index b0afbd4..8d065a1 100644
51860 --- a/fs/namei.c
51861 +++ b/fs/namei.c
51862 @@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
51863 return ret;
51864
51865 /*
51866 + * Searching includes executable on directories, else just read.
51867 + */
51868 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51869 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51870 + if (capable(CAP_DAC_READ_SEARCH))
51871 + return 0;
51872 +
51873 + /*
51874 * Read/write DACs are always overridable.
51875 * Executable DACs are overridable if at least one exec bit is set.
51876 */
51877 @@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
51878 if (capable(CAP_DAC_OVERRIDE))
51879 return 0;
51880
51881 - /*
51882 - * Searching includes executable on directories, else just read.
51883 - */
51884 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51885 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51886 - if (capable(CAP_DAC_READ_SEARCH))
51887 - return 0;
51888 -
51889 return -EACCES;
51890 }
51891
51892 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
51893 if (!ret)
51894 goto ok;
51895
51896 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
51897 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
51898 + capable(CAP_DAC_OVERRIDE))
51899 goto ok;
51900
51901 return ret;
51902 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
51903 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
51904 error = PTR_ERR(cookie);
51905 if (!IS_ERR(cookie)) {
51906 - char *s = nd_get_link(nd);
51907 + const char *s = nd_get_link(nd);
51908 error = 0;
51909 if (s)
51910 error = __vfs_follow_link(nd, s);
51911 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
51912 err = security_inode_follow_link(path->dentry, nd);
51913 if (err)
51914 goto loop;
51915 +
51916 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
51917 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
51918 + err = -EACCES;
51919 + goto loop;
51920 + }
51921 +
51922 current->link_count++;
51923 current->total_link_count++;
51924 nd->depth++;
51925 @@ -1016,11 +1024,19 @@ return_reval:
51926 break;
51927 }
51928 return_base:
51929 + if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
51930 + !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
51931 + path_put(&nd->path);
51932 + return -ENOENT;
51933 + }
51934 return 0;
51935 out_dput:
51936 path_put_conditional(&next, nd);
51937 break;
51938 }
51939 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
51940 + err = -ENOENT;
51941 +
51942 path_put(&nd->path);
51943 return_err:
51944 return err;
51945 @@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
51946 int retval = path_init(dfd, name, flags, nd);
51947 if (!retval)
51948 retval = path_walk(name, nd);
51949 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
51950 - nd->path.dentry->d_inode))
51951 - audit_inode(name, nd->path.dentry);
51952 +
51953 + if (likely(!retval)) {
51954 + if (nd->path.dentry && nd->path.dentry->d_inode) {
51955 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
51956 + retval = -ENOENT;
51957 + if (!audit_dummy_context())
51958 + audit_inode(name, nd->path.dentry);
51959 + }
51960 + }
51961 if (nd->root.mnt) {
51962 path_put(&nd->root);
51963 nd->root.mnt = NULL;
51964 }
51965 +
51966 return retval;
51967 }
51968
51969 @@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
51970 if (error)
51971 goto err_out;
51972
51973 +
51974 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
51975 + error = -EPERM;
51976 + goto err_out;
51977 + }
51978 + if (gr_handle_rawio(inode)) {
51979 + error = -EPERM;
51980 + goto err_out;
51981 + }
51982 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
51983 + error = -EACCES;
51984 + goto err_out;
51985 + }
51986 +
51987 if (flag & O_TRUNC) {
51988 error = get_write_access(inode);
51989 if (error)
51990 @@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51991 {
51992 int error;
51993 struct dentry *dir = nd->path.dentry;
51994 + int acc_mode = ACC_MODE(flag);
51995 +
51996 + if (flag & O_TRUNC)
51997 + acc_mode |= MAY_WRITE;
51998 + if (flag & O_APPEND)
51999 + acc_mode |= MAY_APPEND;
52000 +
52001 + if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
52002 + error = -EACCES;
52003 + goto out_unlock;
52004 + }
52005
52006 if (!IS_POSIXACL(dir->d_inode))
52007 mode &= ~current_umask();
52008 @@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
52009 if (error)
52010 goto out_unlock;
52011 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
52012 + if (!error)
52013 + gr_handle_create(path->dentry, nd->path.mnt);
52014 out_unlock:
52015 mutex_unlock(&dir->d_inode->i_mutex);
52016 dput(nd->path.dentry);
52017 @@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
52018 &nd, flag);
52019 if (error)
52020 return ERR_PTR(error);
52021 +
52022 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
52023 + error = -EPERM;
52024 + goto exit;
52025 + }
52026 +
52027 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
52028 + error = -EPERM;
52029 + goto exit;
52030 + }
52031 +
52032 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
52033 + error = -EACCES;
52034 + goto exit;
52035 + }
52036 +
52037 goto ok;
52038 }
52039
52040 @@ -1795,6 +1861,19 @@ do_last:
52041 /*
52042 * It already exists.
52043 */
52044 +
52045 + if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
52046 + error = -ENOENT;
52047 + goto exit_mutex_unlock;
52048 + }
52049 +
52050 + /* only check if O_CREAT is specified, all other checks need
52051 + to go into may_open */
52052 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
52053 + error = -EACCES;
52054 + goto exit_mutex_unlock;
52055 + }
52056 +
52057 mutex_unlock(&dir->d_inode->i_mutex);
52058 audit_inode(pathname, path.dentry);
52059
52060 @@ -1887,6 +1966,13 @@ do_link:
52061 error = security_inode_follow_link(path.dentry, &nd);
52062 if (error)
52063 goto exit_dput;
52064 +
52065 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
52066 + path.dentry, nd.path.mnt)) {
52067 + error = -EACCES;
52068 + goto exit_dput;
52069 + }
52070 +
52071 error = __do_follow_link(&path, &nd);
52072 if (error) {
52073 /* Does someone understand code flow here? Or it is only
52074 @@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
52075 }
52076 return dentry;
52077 eexist:
52078 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
52079 + dput(dentry);
52080 + return ERR_PTR(-ENOENT);
52081 + }
52082 dput(dentry);
52083 dentry = ERR_PTR(-EEXIST);
52084 fail:
52085 @@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
52086 error = may_mknod(mode);
52087 if (error)
52088 goto out_dput;
52089 +
52090 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
52091 + error = -EPERM;
52092 + goto out_dput;
52093 + }
52094 +
52095 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
52096 + error = -EACCES;
52097 + goto out_dput;
52098 + }
52099 +
52100 error = mnt_want_write(nd.path.mnt);
52101 if (error)
52102 goto out_dput;
52103 @@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
52104 }
52105 out_drop_write:
52106 mnt_drop_write(nd.path.mnt);
52107 +
52108 + if (!error)
52109 + gr_handle_create(dentry, nd.path.mnt);
52110 out_dput:
52111 dput(dentry);
52112 out_unlock:
52113 @@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
52114 if (IS_ERR(dentry))
52115 goto out_unlock;
52116
52117 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
52118 + error = -EACCES;
52119 + goto out_dput;
52120 + }
52121 +
52122 if (!IS_POSIXACL(nd.path.dentry->d_inode))
52123 mode &= ~current_umask();
52124 error = mnt_want_write(nd.path.mnt);
52125 @@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
52126 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
52127 out_drop_write:
52128 mnt_drop_write(nd.path.mnt);
52129 +
52130 + if (!error)
52131 + gr_handle_create(dentry, nd.path.mnt);
52132 +
52133 out_dput:
52134 dput(dentry);
52135 out_unlock:
52136 @@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
52137 char * name;
52138 struct dentry *dentry;
52139 struct nameidata nd;
52140 + ino_t saved_ino = 0;
52141 + dev_t saved_dev = 0;
52142
52143 error = user_path_parent(dfd, pathname, &nd, &name);
52144 if (error)
52145 @@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
52146 error = PTR_ERR(dentry);
52147 if (IS_ERR(dentry))
52148 goto exit2;
52149 +
52150 + if (dentry->d_inode != NULL) {
52151 + saved_ino = dentry->d_inode->i_ino;
52152 + saved_dev = gr_get_dev_from_dentry(dentry);
52153 +
52154 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
52155 + error = -EACCES;
52156 + goto exit3;
52157 + }
52158 + }
52159 +
52160 error = mnt_want_write(nd.path.mnt);
52161 if (error)
52162 goto exit3;
52163 @@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
52164 if (error)
52165 goto exit4;
52166 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
52167 + if (!error && (saved_dev || saved_ino))
52168 + gr_handle_delete(saved_ino, saved_dev);
52169 exit4:
52170 mnt_drop_write(nd.path.mnt);
52171 exit3:
52172 @@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52173 struct dentry *dentry;
52174 struct nameidata nd;
52175 struct inode *inode = NULL;
52176 + ino_t saved_ino = 0;
52177 + dev_t saved_dev = 0;
52178
52179 error = user_path_parent(dfd, pathname, &nd, &name);
52180 if (error)
52181 @@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52182 if (nd.last.name[nd.last.len])
52183 goto slashes;
52184 inode = dentry->d_inode;
52185 - if (inode)
52186 + if (inode) {
52187 + if (inode->i_nlink <= 1) {
52188 + saved_ino = inode->i_ino;
52189 + saved_dev = gr_get_dev_from_dentry(dentry);
52190 + }
52191 +
52192 atomic_inc(&inode->i_count);
52193 +
52194 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
52195 + error = -EACCES;
52196 + goto exit2;
52197 + }
52198 + }
52199 error = mnt_want_write(nd.path.mnt);
52200 if (error)
52201 goto exit2;
52202 @@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52203 if (error)
52204 goto exit3;
52205 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
52206 + if (!error && (saved_ino || saved_dev))
52207 + gr_handle_delete(saved_ino, saved_dev);
52208 exit3:
52209 mnt_drop_write(nd.path.mnt);
52210 exit2:
52211 @@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52212 if (IS_ERR(dentry))
52213 goto out_unlock;
52214
52215 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
52216 + error = -EACCES;
52217 + goto out_dput;
52218 + }
52219 +
52220 error = mnt_want_write(nd.path.mnt);
52221 if (error)
52222 goto out_dput;
52223 @@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52224 if (error)
52225 goto out_drop_write;
52226 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
52227 + if (!error)
52228 + gr_handle_create(dentry, nd.path.mnt);
52229 out_drop_write:
52230 mnt_drop_write(nd.path.mnt);
52231 out_dput:
52232 @@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52233 error = PTR_ERR(new_dentry);
52234 if (IS_ERR(new_dentry))
52235 goto out_unlock;
52236 +
52237 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
52238 + old_path.dentry->d_inode,
52239 + old_path.dentry->d_inode->i_mode, to)) {
52240 + error = -EACCES;
52241 + goto out_dput;
52242 + }
52243 +
52244 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
52245 + old_path.dentry, old_path.mnt, to)) {
52246 + error = -EACCES;
52247 + goto out_dput;
52248 + }
52249 +
52250 error = mnt_want_write(nd.path.mnt);
52251 if (error)
52252 goto out_dput;
52253 @@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52254 if (error)
52255 goto out_drop_write;
52256 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
52257 + if (!error)
52258 + gr_handle_create(new_dentry, nd.path.mnt);
52259 out_drop_write:
52260 mnt_drop_write(nd.path.mnt);
52261 out_dput:
52262 @@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52263 char *to;
52264 int error;
52265
52266 + pax_track_stack();
52267 +
52268 error = user_path_parent(olddfd, oldname, &oldnd, &from);
52269 if (error)
52270 goto exit;
52271 @@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52272 if (new_dentry == trap)
52273 goto exit5;
52274
52275 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
52276 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
52277 + to);
52278 + if (error)
52279 + goto exit5;
52280 +
52281 error = mnt_want_write(oldnd.path.mnt);
52282 if (error)
52283 goto exit5;
52284 @@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52285 goto exit6;
52286 error = vfs_rename(old_dir->d_inode, old_dentry,
52287 new_dir->d_inode, new_dentry);
52288 + if (!error)
52289 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
52290 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
52291 exit6:
52292 mnt_drop_write(oldnd.path.mnt);
52293 exit5:
52294 @@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
52295
52296 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
52297 {
52298 + char tmpbuf[64];
52299 + const char *newlink;
52300 int len;
52301
52302 len = PTR_ERR(link);
52303 @@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
52304 len = strlen(link);
52305 if (len > (unsigned) buflen)
52306 len = buflen;
52307 - if (copy_to_user(buffer, link, len))
52308 +
52309 + if (len < sizeof(tmpbuf)) {
52310 + memcpy(tmpbuf, link, len);
52311 + newlink = tmpbuf;
52312 + } else
52313 + newlink = link;
52314 +
52315 + if (copy_to_user(buffer, newlink, len))
52316 len = -EFAULT;
52317 out:
52318 return len;
52319 diff --git a/fs/namespace.c b/fs/namespace.c
52320 index 2beb0fb..11a95a5 100644
52321 --- a/fs/namespace.c
52322 +++ b/fs/namespace.c
52323 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52324 if (!(sb->s_flags & MS_RDONLY))
52325 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
52326 up_write(&sb->s_umount);
52327 +
52328 + gr_log_remount(mnt->mnt_devname, retval);
52329 +
52330 return retval;
52331 }
52332
52333 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52334 security_sb_umount_busy(mnt);
52335 up_write(&namespace_sem);
52336 release_mounts(&umount_list);
52337 +
52338 + gr_log_unmount(mnt->mnt_devname, retval);
52339 +
52340 return retval;
52341 }
52342
52343 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52344 if (retval)
52345 goto dput_out;
52346
52347 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
52348 + retval = -EPERM;
52349 + goto dput_out;
52350 + }
52351 +
52352 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
52353 + retval = -EPERM;
52354 + goto dput_out;
52355 + }
52356 +
52357 if (flags & MS_REMOUNT)
52358 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
52359 data_page);
52360 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52361 dev_name, data_page);
52362 dput_out:
52363 path_put(&path);
52364 +
52365 + gr_log_mount(dev_name, dir_name, retval);
52366 +
52367 return retval;
52368 }
52369
52370 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
52371 goto out1;
52372 }
52373
52374 + if (gr_handle_chroot_pivot()) {
52375 + error = -EPERM;
52376 + path_put(&old);
52377 + goto out1;
52378 + }
52379 +
52380 read_lock(&current->fs->lock);
52381 root = current->fs->root;
52382 path_get(&current->fs->root);
52383 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
52384 index b8b5b30..2bd9ccb 100644
52385 --- a/fs/ncpfs/dir.c
52386 +++ b/fs/ncpfs/dir.c
52387 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
52388 int res, val = 0, len;
52389 __u8 __name[NCP_MAXPATHLEN + 1];
52390
52391 + pax_track_stack();
52392 +
52393 parent = dget_parent(dentry);
52394 dir = parent->d_inode;
52395
52396 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
52397 int error, res, len;
52398 __u8 __name[NCP_MAXPATHLEN + 1];
52399
52400 + pax_track_stack();
52401 +
52402 lock_kernel();
52403 error = -EIO;
52404 if (!ncp_conn_valid(server))
52405 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
52406 int error, result, len;
52407 int opmode;
52408 __u8 __name[NCP_MAXPATHLEN + 1];
52409 -
52410 +
52411 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
52412 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
52413
52414 + pax_track_stack();
52415 +
52416 error = -EIO;
52417 lock_kernel();
52418 if (!ncp_conn_valid(server))
52419 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52420 int error, len;
52421 __u8 __name[NCP_MAXPATHLEN + 1];
52422
52423 + pax_track_stack();
52424 +
52425 DPRINTK("ncp_mkdir: making %s/%s\n",
52426 dentry->d_parent->d_name.name, dentry->d_name.name);
52427
52428 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52429 if (!ncp_conn_valid(server))
52430 goto out;
52431
52432 + pax_track_stack();
52433 +
52434 ncp_age_dentry(server, dentry);
52435 len = sizeof(__name);
52436 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
52437 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
52438 int old_len, new_len;
52439 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
52440
52441 + pax_track_stack();
52442 +
52443 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
52444 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
52445 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
52446 diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
52447 index cf98da1..da890a9 100644
52448 --- a/fs/ncpfs/inode.c
52449 +++ b/fs/ncpfs/inode.c
52450 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
52451 #endif
52452 struct ncp_entry_info finfo;
52453
52454 + pax_track_stack();
52455 +
52456 data.wdog_pid = NULL;
52457 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
52458 if (!server)
52459 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
52460 index bfaef7b..e9d03ca 100644
52461 --- a/fs/nfs/inode.c
52462 +++ b/fs/nfs/inode.c
52463 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
52464 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
52465 nfsi->attrtimeo_timestamp = jiffies;
52466
52467 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
52468 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
52469 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
52470 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
52471 else
52472 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
52473 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
52474 }
52475
52476 -static atomic_long_t nfs_attr_generation_counter;
52477 +static atomic_long_unchecked_t nfs_attr_generation_counter;
52478
52479 static unsigned long nfs_read_attr_generation_counter(void)
52480 {
52481 - return atomic_long_read(&nfs_attr_generation_counter);
52482 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
52483 }
52484
52485 unsigned long nfs_inc_attr_generation_counter(void)
52486 {
52487 - return atomic_long_inc_return(&nfs_attr_generation_counter);
52488 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
52489 }
52490
52491 void nfs_fattr_init(struct nfs_fattr *fattr)
52492 diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
52493 index cc2f505..f6a236f 100644
52494 --- a/fs/nfsd/lockd.c
52495 +++ b/fs/nfsd/lockd.c
52496 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
52497 fput(filp);
52498 }
52499
52500 -static struct nlmsvc_binding nfsd_nlm_ops = {
52501 +static const struct nlmsvc_binding nfsd_nlm_ops = {
52502 .fopen = nlm_fopen, /* open file for locking */
52503 .fclose = nlm_fclose, /* close file */
52504 };
52505 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
52506 index cfc3391..dcc083a 100644
52507 --- a/fs/nfsd/nfs4state.c
52508 +++ b/fs/nfsd/nfs4state.c
52509 @@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
52510 unsigned int cmd;
52511 int err;
52512
52513 + pax_track_stack();
52514 +
52515 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
52516 (long long) lock->lk_offset,
52517 (long long) lock->lk_length);
52518 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
52519 index 4a82a96..0d5fb49 100644
52520 --- a/fs/nfsd/nfs4xdr.c
52521 +++ b/fs/nfsd/nfs4xdr.c
52522 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
52523 struct nfsd4_compoundres *resp = rqstp->rq_resp;
52524 u32 minorversion = resp->cstate.minorversion;
52525
52526 + pax_track_stack();
52527 +
52528 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
52529 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
52530 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
52531 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
52532 index 2e09588..596421d 100644
52533 --- a/fs/nfsd/vfs.c
52534 +++ b/fs/nfsd/vfs.c
52535 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52536 } else {
52537 oldfs = get_fs();
52538 set_fs(KERNEL_DS);
52539 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
52540 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
52541 set_fs(oldfs);
52542 }
52543
52544 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52545
52546 /* Write the data. */
52547 oldfs = get_fs(); set_fs(KERNEL_DS);
52548 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
52549 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
52550 set_fs(oldfs);
52551 if (host_err < 0)
52552 goto out_nfserr;
52553 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
52554 */
52555
52556 oldfs = get_fs(); set_fs(KERNEL_DS);
52557 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
52558 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
52559 set_fs(oldfs);
52560
52561 if (host_err < 0)
52562 diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
52563 index f6af760..d0adf34 100644
52564 --- a/fs/nilfs2/ioctl.c
52565 +++ b/fs/nilfs2/ioctl.c
52566 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52567 unsigned int cmd, void __user *argp)
52568 {
52569 struct nilfs_argv argv[5];
52570 - const static size_t argsz[5] = {
52571 + static const size_t argsz[5] = {
52572 sizeof(struct nilfs_vdesc),
52573 sizeof(struct nilfs_period),
52574 sizeof(__u64),
52575 @@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52576 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
52577 goto out_free;
52578
52579 + if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
52580 + goto out_free;
52581 +
52582 len = argv[n].v_size * argv[n].v_nmembs;
52583 base = (void __user *)(unsigned long)argv[n].v_base;
52584 if (len == 0) {
52585 diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
52586 index 7e54e52..9337248 100644
52587 --- a/fs/notify/dnotify/dnotify.c
52588 +++ b/fs/notify/dnotify/dnotify.c
52589 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
52590 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
52591 }
52592
52593 -static struct fsnotify_ops dnotify_fsnotify_ops = {
52594 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
52595 .handle_event = dnotify_handle_event,
52596 .should_send_event = dnotify_should_send_event,
52597 .free_group_priv = NULL,
52598 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
52599 index b8bf53b..c518688 100644
52600 --- a/fs/notify/notification.c
52601 +++ b/fs/notify/notification.c
52602 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
52603 * get set to 0 so it will never get 'freed'
52604 */
52605 static struct fsnotify_event q_overflow_event;
52606 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52607 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52608
52609 /**
52610 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
52611 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52612 */
52613 u32 fsnotify_get_cookie(void)
52614 {
52615 - return atomic_inc_return(&fsnotify_sync_cookie);
52616 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
52617 }
52618 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
52619
52620 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
52621 index 5a9e344..0f8cd28 100644
52622 --- a/fs/ntfs/dir.c
52623 +++ b/fs/ntfs/dir.c
52624 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
52625 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
52626 ~(s64)(ndir->itype.index.block_size - 1)));
52627 /* Bounds checks. */
52628 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52629 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52630 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
52631 "inode 0x%lx or driver bug.", vdir->i_ino);
52632 goto err_out;
52633 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
52634 index 663c0e3..b6868e9 100644
52635 --- a/fs/ntfs/file.c
52636 +++ b/fs/ntfs/file.c
52637 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
52638 #endif /* NTFS_RW */
52639 };
52640
52641 -const struct file_operations ntfs_empty_file_ops = {};
52642 +const struct file_operations ntfs_empty_file_ops __read_only;
52643
52644 -const struct inode_operations ntfs_empty_inode_ops = {};
52645 +const struct inode_operations ntfs_empty_inode_ops __read_only;
52646 diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
52647 index 1cd2934..880b5d2 100644
52648 --- a/fs/ocfs2/cluster/masklog.c
52649 +++ b/fs/ocfs2/cluster/masklog.c
52650 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
52651 return mlog_mask_store(mlog_attr->mask, buf, count);
52652 }
52653
52654 -static struct sysfs_ops mlog_attr_ops = {
52655 +static const struct sysfs_ops mlog_attr_ops = {
52656 .show = mlog_show,
52657 .store = mlog_store,
52658 };
52659 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
52660 index ac10f83..2cd2607 100644
52661 --- a/fs/ocfs2/localalloc.c
52662 +++ b/fs/ocfs2/localalloc.c
52663 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
52664 goto bail;
52665 }
52666
52667 - atomic_inc(&osb->alloc_stats.moves);
52668 + atomic_inc_unchecked(&osb->alloc_stats.moves);
52669
52670 status = 0;
52671 bail:
52672 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
52673 index f010b22..9f9ed34 100644
52674 --- a/fs/ocfs2/namei.c
52675 +++ b/fs/ocfs2/namei.c
52676 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
52677 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
52678 struct ocfs2_dir_lookup_result target_insert = { NULL, };
52679
52680 + pax_track_stack();
52681 +
52682 /* At some point it might be nice to break this function up a
52683 * bit. */
52684
52685 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
52686 index d963d86..914cfbd 100644
52687 --- a/fs/ocfs2/ocfs2.h
52688 +++ b/fs/ocfs2/ocfs2.h
52689 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
52690
52691 struct ocfs2_alloc_stats
52692 {
52693 - atomic_t moves;
52694 - atomic_t local_data;
52695 - atomic_t bitmap_data;
52696 - atomic_t bg_allocs;
52697 - atomic_t bg_extends;
52698 + atomic_unchecked_t moves;
52699 + atomic_unchecked_t local_data;
52700 + atomic_unchecked_t bitmap_data;
52701 + atomic_unchecked_t bg_allocs;
52702 + atomic_unchecked_t bg_extends;
52703 };
52704
52705 enum ocfs2_local_alloc_state
52706 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
52707 index 79b5dac..d322952 100644
52708 --- a/fs/ocfs2/suballoc.c
52709 +++ b/fs/ocfs2/suballoc.c
52710 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
52711 mlog_errno(status);
52712 goto bail;
52713 }
52714 - atomic_inc(&osb->alloc_stats.bg_extends);
52715 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
52716
52717 /* You should never ask for this much metadata */
52718 BUG_ON(bits_wanted >
52719 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
52720 mlog_errno(status);
52721 goto bail;
52722 }
52723 - atomic_inc(&osb->alloc_stats.bg_allocs);
52724 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52725
52726 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
52727 ac->ac_bits_given += (*num_bits);
52728 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
52729 mlog_errno(status);
52730 goto bail;
52731 }
52732 - atomic_inc(&osb->alloc_stats.bg_allocs);
52733 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52734
52735 BUG_ON(num_bits != 1);
52736
52737 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52738 cluster_start,
52739 num_clusters);
52740 if (!status)
52741 - atomic_inc(&osb->alloc_stats.local_data);
52742 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
52743 } else {
52744 if (min_clusters > (osb->bitmap_cpg - 1)) {
52745 /* The only paths asking for contiguousness
52746 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52747 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
52748 bg_blkno,
52749 bg_bit_off);
52750 - atomic_inc(&osb->alloc_stats.bitmap_data);
52751 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
52752 }
52753 }
52754 if (status < 0) {
52755 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
52756 index 9f55be4..a3f8048 100644
52757 --- a/fs/ocfs2/super.c
52758 +++ b/fs/ocfs2/super.c
52759 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
52760 "%10s => GlobalAllocs: %d LocalAllocs: %d "
52761 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
52762 "Stats",
52763 - atomic_read(&osb->alloc_stats.bitmap_data),
52764 - atomic_read(&osb->alloc_stats.local_data),
52765 - atomic_read(&osb->alloc_stats.bg_allocs),
52766 - atomic_read(&osb->alloc_stats.moves),
52767 - atomic_read(&osb->alloc_stats.bg_extends));
52768 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
52769 + atomic_read_unchecked(&osb->alloc_stats.local_data),
52770 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
52771 + atomic_read_unchecked(&osb->alloc_stats.moves),
52772 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
52773
52774 out += snprintf(buf + out, len - out,
52775 "%10s => State: %u Descriptor: %llu Size: %u bits "
52776 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
52777 spin_lock_init(&osb->osb_xattr_lock);
52778 ocfs2_init_inode_steal_slot(osb);
52779
52780 - atomic_set(&osb->alloc_stats.moves, 0);
52781 - atomic_set(&osb->alloc_stats.local_data, 0);
52782 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
52783 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
52784 - atomic_set(&osb->alloc_stats.bg_extends, 0);
52785 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
52786 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
52787 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
52788 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
52789 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
52790
52791 /* Copy the blockcheck stats from the superblock probe */
52792 osb->osb_ecc_stats = *stats;
52793 diff --git a/fs/open.c b/fs/open.c
52794 index 4f01e06..2a8057a 100644
52795 --- a/fs/open.c
52796 +++ b/fs/open.c
52797 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
52798 error = locks_verify_truncate(inode, NULL, length);
52799 if (!error)
52800 error = security_path_truncate(&path, length, 0);
52801 +
52802 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
52803 + error = -EACCES;
52804 +
52805 if (!error) {
52806 vfs_dq_init(inode);
52807 error = do_truncate(path.dentry, length, 0, NULL);
52808 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
52809 if (__mnt_is_readonly(path.mnt))
52810 res = -EROFS;
52811
52812 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
52813 + res = -EACCES;
52814 +
52815 out_path_release:
52816 path_put(&path);
52817 out:
52818 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
52819 if (error)
52820 goto dput_and_out;
52821
52822 + gr_log_chdir(path.dentry, path.mnt);
52823 +
52824 set_fs_pwd(current->fs, &path);
52825
52826 dput_and_out:
52827 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
52828 goto out_putf;
52829
52830 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
52831 +
52832 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
52833 + error = -EPERM;
52834 +
52835 + if (!error)
52836 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
52837 +
52838 if (!error)
52839 set_fs_pwd(current->fs, &file->f_path);
52840 out_putf:
52841 @@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
52842 if (!capable(CAP_SYS_CHROOT))
52843 goto dput_and_out;
52844
52845 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
52846 + goto dput_and_out;
52847 +
52848 set_fs_root(current->fs, &path);
52849 +
52850 + gr_handle_chroot_chdir(&path);
52851 +
52852 error = 0;
52853 dput_and_out:
52854 path_put(&path);
52855 @@ -596,66 +618,57 @@ out:
52856 return error;
52857 }
52858
52859 +static int chmod_common(struct path *path, umode_t mode)
52860 +{
52861 + struct inode *inode = path->dentry->d_inode;
52862 + struct iattr newattrs;
52863 + int error;
52864 +
52865 + error = mnt_want_write(path->mnt);
52866 + if (error)
52867 + return error;
52868 + mutex_lock(&inode->i_mutex);
52869 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
52870 + error = -EACCES;
52871 + goto out_unlock;
52872 + }
52873 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
52874 + error = -EPERM;
52875 + goto out_unlock;
52876 + }
52877 + newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52878 + newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52879 + error = notify_change(path->dentry, &newattrs);
52880 +out_unlock:
52881 + mutex_unlock(&inode->i_mutex);
52882 + mnt_drop_write(path->mnt);
52883 + return error;
52884 +}
52885 +
52886 SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
52887 {
52888 - struct inode * inode;
52889 - struct dentry * dentry;
52890 struct file * file;
52891 int err = -EBADF;
52892 - struct iattr newattrs;
52893
52894 file = fget(fd);
52895 - if (!file)
52896 - goto out;
52897 -
52898 - dentry = file->f_path.dentry;
52899 - inode = dentry->d_inode;
52900 -
52901 - audit_inode(NULL, dentry);
52902 -
52903 - err = mnt_want_write_file(file);
52904 - if (err)
52905 - goto out_putf;
52906 - mutex_lock(&inode->i_mutex);
52907 - if (mode == (mode_t) -1)
52908 - mode = inode->i_mode;
52909 - newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52910 - newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52911 - err = notify_change(dentry, &newattrs);
52912 - mutex_unlock(&inode->i_mutex);
52913 - mnt_drop_write(file->f_path.mnt);
52914 -out_putf:
52915 - fput(file);
52916 -out:
52917 + if (file) {
52918 + audit_inode(NULL, file->f_path.dentry);
52919 + err = chmod_common(&file->f_path, mode);
52920 + fput(file);
52921 + }
52922 return err;
52923 }
52924
52925 SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
52926 {
52927 struct path path;
52928 - struct inode *inode;
52929 int error;
52930 - struct iattr newattrs;
52931
52932 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
52933 - if (error)
52934 - goto out;
52935 - inode = path.dentry->d_inode;
52936 -
52937 - error = mnt_want_write(path.mnt);
52938 - if (error)
52939 - goto dput_and_out;
52940 - mutex_lock(&inode->i_mutex);
52941 - if (mode == (mode_t) -1)
52942 - mode = inode->i_mode;
52943 - newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52944 - newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52945 - error = notify_change(path.dentry, &newattrs);
52946 - mutex_unlock(&inode->i_mutex);
52947 - mnt_drop_write(path.mnt);
52948 -dput_and_out:
52949 - path_put(&path);
52950 -out:
52951 + if (!error) {
52952 + error = chmod_common(&path, mode);
52953 + path_put(&path);
52954 + }
52955 return error;
52956 }
52957
52958 @@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
52959 return sys_fchmodat(AT_FDCWD, filename, mode);
52960 }
52961
52962 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
52963 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
52964 {
52965 struct inode *inode = dentry->d_inode;
52966 int error;
52967 struct iattr newattrs;
52968
52969 + if (!gr_acl_handle_chown(dentry, mnt))
52970 + return -EACCES;
52971 +
52972 newattrs.ia_valid = ATTR_CTIME;
52973 if (user != (uid_t) -1) {
52974 newattrs.ia_valid |= ATTR_UID;
52975 @@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
52976 error = mnt_want_write(path.mnt);
52977 if (error)
52978 goto out_release;
52979 - error = chown_common(path.dentry, user, group);
52980 + error = chown_common(path.dentry, user, group, path.mnt);
52981 mnt_drop_write(path.mnt);
52982 out_release:
52983 path_put(&path);
52984 @@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
52985 error = mnt_want_write(path.mnt);
52986 if (error)
52987 goto out_release;
52988 - error = chown_common(path.dentry, user, group);
52989 + error = chown_common(path.dentry, user, group, path.mnt);
52990 mnt_drop_write(path.mnt);
52991 out_release:
52992 path_put(&path);
52993 @@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
52994 error = mnt_want_write(path.mnt);
52995 if (error)
52996 goto out_release;
52997 - error = chown_common(path.dentry, user, group);
52998 + error = chown_common(path.dentry, user, group, path.mnt);
52999 mnt_drop_write(path.mnt);
53000 out_release:
53001 path_put(&path);
53002 @@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
53003 goto out_fput;
53004 dentry = file->f_path.dentry;
53005 audit_inode(NULL, dentry);
53006 - error = chown_common(dentry, user, group);
53007 + error = chown_common(dentry, user, group, file->f_path.mnt);
53008 mnt_drop_write(file->f_path.mnt);
53009 out_fput:
53010 fput(file);
53011 @@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
53012 if (!IS_ERR(tmp)) {
53013 fd = get_unused_fd_flags(flags);
53014 if (fd >= 0) {
53015 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
53016 + struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
53017 if (IS_ERR(f)) {
53018 put_unused_fd(fd);
53019 fd = PTR_ERR(f);
53020 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
53021 index 6ab70f4..f4103d1 100644
53022 --- a/fs/partitions/efi.c
53023 +++ b/fs/partitions/efi.c
53024 @@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
53025 if (!bdev || !gpt)
53026 return NULL;
53027
53028 + if (!le32_to_cpu(gpt->num_partition_entries))
53029 + return NULL;
53030 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
53031 + if (!pte)
53032 + return NULL;
53033 +
53034 count = le32_to_cpu(gpt->num_partition_entries) *
53035 le32_to_cpu(gpt->sizeof_partition_entry);
53036 - if (!count)
53037 - return NULL;
53038 - pte = kzalloc(count, GFP_KERNEL);
53039 - if (!pte)
53040 - return NULL;
53041 -
53042 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
53043 (u8 *) pte,
53044 count) < count) {
53045 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
53046 index dd6efdb..3babc6c 100644
53047 --- a/fs/partitions/ldm.c
53048 +++ b/fs/partitions/ldm.c
53049 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
53050 ldm_error ("A VBLK claims to have %d parts.", num);
53051 return false;
53052 }
53053 +
53054 if (rec >= num) {
53055 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
53056 return false;
53057 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
53058 goto found;
53059 }
53060
53061 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
53062 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
53063 if (!f) {
53064 ldm_crit ("Out of memory.");
53065 return false;
53066 diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
53067 index 5765198..7f8e9e0 100644
53068 --- a/fs/partitions/mac.c
53069 +++ b/fs/partitions/mac.c
53070 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
53071 return 0; /* not a MacOS disk */
53072 }
53073 blocks_in_map = be32_to_cpu(part->map_count);
53074 - if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
53075 - put_dev_sector(sect);
53076 - return 0;
53077 - }
53078 printk(" [mac]");
53079 + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
53080 + put_dev_sector(sect);
53081 + return 0;
53082 + }
53083 for (slot = 1; slot <= blocks_in_map; ++slot) {
53084 int pos = slot * secsize;
53085 put_dev_sector(sect);
53086 diff --git a/fs/pipe.c b/fs/pipe.c
53087 index d0cc080..8a6f211 100644
53088 --- a/fs/pipe.c
53089 +++ b/fs/pipe.c
53090 @@ -401,9 +401,9 @@ redo:
53091 }
53092 if (bufs) /* More to do? */
53093 continue;
53094 - if (!pipe->writers)
53095 + if (!atomic_read(&pipe->writers))
53096 break;
53097 - if (!pipe->waiting_writers) {
53098 + if (!atomic_read(&pipe->waiting_writers)) {
53099 /* syscall merging: Usually we must not sleep
53100 * if O_NONBLOCK is set, or if we got some data.
53101 * But if a writer sleeps in kernel space, then
53102 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
53103 mutex_lock(&inode->i_mutex);
53104 pipe = inode->i_pipe;
53105
53106 - if (!pipe->readers) {
53107 + if (!atomic_read(&pipe->readers)) {
53108 send_sig(SIGPIPE, current, 0);
53109 ret = -EPIPE;
53110 goto out;
53111 @@ -511,7 +511,7 @@ redo1:
53112 for (;;) {
53113 int bufs;
53114
53115 - if (!pipe->readers) {
53116 + if (!atomic_read(&pipe->readers)) {
53117 send_sig(SIGPIPE, current, 0);
53118 if (!ret)
53119 ret = -EPIPE;
53120 @@ -597,9 +597,9 @@ redo2:
53121 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
53122 do_wakeup = 0;
53123 }
53124 - pipe->waiting_writers++;
53125 + atomic_inc(&pipe->waiting_writers);
53126 pipe_wait(pipe);
53127 - pipe->waiting_writers--;
53128 + atomic_dec(&pipe->waiting_writers);
53129 }
53130 out:
53131 mutex_unlock(&inode->i_mutex);
53132 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53133 mask = 0;
53134 if (filp->f_mode & FMODE_READ) {
53135 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
53136 - if (!pipe->writers && filp->f_version != pipe->w_counter)
53137 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
53138 mask |= POLLHUP;
53139 }
53140
53141 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53142 * Most Unices do not set POLLERR for FIFOs but on Linux they
53143 * behave exactly like pipes for poll().
53144 */
53145 - if (!pipe->readers)
53146 + if (!atomic_read(&pipe->readers))
53147 mask |= POLLERR;
53148 }
53149
53150 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
53151
53152 mutex_lock(&inode->i_mutex);
53153 pipe = inode->i_pipe;
53154 - pipe->readers -= decr;
53155 - pipe->writers -= decw;
53156 + atomic_sub(decr, &pipe->readers);
53157 + atomic_sub(decw, &pipe->writers);
53158
53159 - if (!pipe->readers && !pipe->writers) {
53160 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
53161 free_pipe_info(inode);
53162 } else {
53163 wake_up_interruptible_sync(&pipe->wait);
53164 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
53165
53166 if (inode->i_pipe) {
53167 ret = 0;
53168 - inode->i_pipe->readers++;
53169 + atomic_inc(&inode->i_pipe->readers);
53170 }
53171
53172 mutex_unlock(&inode->i_mutex);
53173 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
53174
53175 if (inode->i_pipe) {
53176 ret = 0;
53177 - inode->i_pipe->writers++;
53178 + atomic_inc(&inode->i_pipe->writers);
53179 }
53180
53181 mutex_unlock(&inode->i_mutex);
53182 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
53183 if (inode->i_pipe) {
53184 ret = 0;
53185 if (filp->f_mode & FMODE_READ)
53186 - inode->i_pipe->readers++;
53187 + atomic_inc(&inode->i_pipe->readers);
53188 if (filp->f_mode & FMODE_WRITE)
53189 - inode->i_pipe->writers++;
53190 + atomic_inc(&inode->i_pipe->writers);
53191 }
53192
53193 mutex_unlock(&inode->i_mutex);
53194 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
53195 inode->i_pipe = NULL;
53196 }
53197
53198 -static struct vfsmount *pipe_mnt __read_mostly;
53199 +struct vfsmount *pipe_mnt __read_mostly;
53200 static int pipefs_delete_dentry(struct dentry *dentry)
53201 {
53202 /*
53203 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
53204 goto fail_iput;
53205 inode->i_pipe = pipe;
53206
53207 - pipe->readers = pipe->writers = 1;
53208 + atomic_set(&pipe->readers, 1);
53209 + atomic_set(&pipe->writers, 1);
53210 inode->i_fop = &rdwr_pipefifo_fops;
53211
53212 /*
53213 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
53214 index 50f8f06..c5755df 100644
53215 --- a/fs/proc/Kconfig
53216 +++ b/fs/proc/Kconfig
53217 @@ -30,12 +30,12 @@ config PROC_FS
53218
53219 config PROC_KCORE
53220 bool "/proc/kcore support" if !ARM
53221 - depends on PROC_FS && MMU
53222 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
53223
53224 config PROC_VMCORE
53225 bool "/proc/vmcore support (EXPERIMENTAL)"
53226 - depends on PROC_FS && CRASH_DUMP
53227 - default y
53228 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
53229 + default n
53230 help
53231 Exports the dump image of crashed kernel in ELF format.
53232
53233 @@ -59,8 +59,8 @@ config PROC_SYSCTL
53234 limited in memory.
53235
53236 config PROC_PAGE_MONITOR
53237 - default y
53238 - depends on PROC_FS && MMU
53239 + default n
53240 + depends on PROC_FS && MMU && !GRKERNSEC
53241 bool "Enable /proc page monitoring" if EMBEDDED
53242 help
53243 Various /proc files exist to monitor process memory utilization:
53244 diff --git a/fs/proc/array.c b/fs/proc/array.c
53245 index c5ef152..28c94f7 100644
53246 --- a/fs/proc/array.c
53247 +++ b/fs/proc/array.c
53248 @@ -60,6 +60,7 @@
53249 #include <linux/tty.h>
53250 #include <linux/string.h>
53251 #include <linux/mman.h>
53252 +#include <linux/grsecurity.h>
53253 #include <linux/proc_fs.h>
53254 #include <linux/ioport.h>
53255 #include <linux/uaccess.h>
53256 @@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
53257 p->nivcsw);
53258 }
53259
53260 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53261 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
53262 +{
53263 + if (p->mm)
53264 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
53265 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
53266 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
53267 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
53268 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
53269 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
53270 + else
53271 + seq_printf(m, "PaX:\t-----\n");
53272 +}
53273 +#endif
53274 +
53275 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53276 struct pid *pid, struct task_struct *task)
53277 {
53278 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53279 task_cap(m, task);
53280 cpuset_task_status_allowed(m, task);
53281 task_context_switch_counts(m, task);
53282 +
53283 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53284 + task_pax(m, task);
53285 +#endif
53286 +
53287 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
53288 + task_grsec_rbac(m, task);
53289 +#endif
53290 +
53291 return 0;
53292 }
53293
53294 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53295 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53296 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53297 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53298 +#endif
53299 +
53300 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53301 struct pid *pid, struct task_struct *task, int whole)
53302 {
53303 @@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53304 cputime_t cutime, cstime, utime, stime;
53305 cputime_t cgtime, gtime;
53306 unsigned long rsslim = 0;
53307 - char tcomm[sizeof(task->comm)];
53308 + char tcomm[sizeof(task->comm)] = { 0 };
53309 unsigned long flags;
53310
53311 + pax_track_stack();
53312 +
53313 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53314 + if (current->exec_id != m->exec_id) {
53315 + gr_log_badprocpid("stat");
53316 + return 0;
53317 + }
53318 +#endif
53319 +
53320 state = *get_task_state(task);
53321 vsize = eip = esp = 0;
53322 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
53323 @@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53324 gtime = task_gtime(task);
53325 }
53326
53327 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53328 + if (PAX_RAND_FLAGS(mm)) {
53329 + eip = 0;
53330 + esp = 0;
53331 + wchan = 0;
53332 + }
53333 +#endif
53334 +#ifdef CONFIG_GRKERNSEC_HIDESYM
53335 + wchan = 0;
53336 + eip =0;
53337 + esp =0;
53338 +#endif
53339 +
53340 /* scale priority and nice values from timeslices to -20..20 */
53341 /* to make it look like a "normal" Unix priority/nice value */
53342 priority = task_prio(task);
53343 @@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53344 vsize,
53345 mm ? get_mm_rss(mm) : 0,
53346 rsslim,
53347 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53348 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
53349 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
53350 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
53351 +#else
53352 mm ? (permitted ? mm->start_code : 1) : 0,
53353 mm ? (permitted ? mm->end_code : 1) : 0,
53354 (permitted && mm) ? mm->start_stack : 0,
53355 +#endif
53356 esp,
53357 eip,
53358 /* The signal information here is obsolete.
53359 @@ -517,8 +576,16 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53360 struct pid *pid, struct task_struct *task)
53361 {
53362 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
53363 - struct mm_struct *mm = get_task_mm(task);
53364 + struct mm_struct *mm;
53365
53366 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53367 + if (current->exec_id != m->exec_id) {
53368 + gr_log_badprocpid("statm");
53369 + return 0;
53370 + }
53371 +#endif
53372 +
53373 + mm = get_task_mm(task);
53374 if (mm) {
53375 size = task_statm(mm, &shared, &text, &data, &resident);
53376 mmput(mm);
53377 @@ -528,3 +595,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53378
53379 return 0;
53380 }
53381 +
53382 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53383 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
53384 +{
53385 + u32 curr_ip = 0;
53386 + unsigned long flags;
53387 +
53388 + if (lock_task_sighand(task, &flags)) {
53389 + curr_ip = task->signal->curr_ip;
53390 + unlock_task_sighand(task, &flags);
53391 + }
53392 +
53393 + return sprintf(buffer, "%pI4\n", &curr_ip);
53394 +}
53395 +#endif
53396 diff --git a/fs/proc/base.c b/fs/proc/base.c
53397 index 67f7dc0..a86ad9a 100644
53398 --- a/fs/proc/base.c
53399 +++ b/fs/proc/base.c
53400 @@ -102,6 +102,22 @@ struct pid_entry {
53401 union proc_op op;
53402 };
53403
53404 +struct getdents_callback {
53405 + struct linux_dirent __user * current_dir;
53406 + struct linux_dirent __user * previous;
53407 + struct file * file;
53408 + int count;
53409 + int error;
53410 +};
53411 +
53412 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
53413 + loff_t offset, u64 ino, unsigned int d_type)
53414 +{
53415 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
53416 + buf->error = -EINVAL;
53417 + return 0;
53418 +}
53419 +
53420 #define NOD(NAME, MODE, IOP, FOP, OP) { \
53421 .name = (NAME), \
53422 .len = sizeof(NAME) - 1, \
53423 @@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
53424 if (task == current)
53425 return 0;
53426
53427 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
53428 + return -EPERM;
53429 +
53430 /*
53431 * If current is actively ptrace'ing, and would also be
53432 * permitted to freshly attach with ptrace now, permit it.
53433 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
53434 if (!mm->arg_end)
53435 goto out_mm; /* Shh! No looking before we're done */
53436
53437 + if (gr_acl_handle_procpidmem(task))
53438 + goto out_mm;
53439 +
53440 len = mm->arg_end - mm->arg_start;
53441
53442 if (len > PAGE_SIZE)
53443 @@ -287,12 +309,28 @@ out:
53444 return res;
53445 }
53446
53447 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53448 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53449 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53450 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53451 +#endif
53452 +
53453 static int proc_pid_auxv(struct task_struct *task, char *buffer)
53454 {
53455 int res = 0;
53456 struct mm_struct *mm = get_task_mm(task);
53457 if (mm) {
53458 unsigned int nwords = 0;
53459 +
53460 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53461 + /* allow if we're currently ptracing this task */
53462 + if (PAX_RAND_FLAGS(mm) &&
53463 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
53464 + mmput(mm);
53465 + return 0;
53466 + }
53467 +#endif
53468 +
53469 do {
53470 nwords += 2;
53471 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
53472 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
53473 }
53474
53475
53476 -#ifdef CONFIG_KALLSYMS
53477 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53478 /*
53479 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
53480 * Returns the resolved symbol. If that fails, simply return the address.
53481 @@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
53482 mutex_unlock(&task->cred_guard_mutex);
53483 }
53484
53485 -#ifdef CONFIG_STACKTRACE
53486 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53487
53488 #define MAX_STACK_TRACE_DEPTH 64
53489
53490 @@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
53491 return count;
53492 }
53493
53494 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53495 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53496 static int proc_pid_syscall(struct task_struct *task, char *buffer)
53497 {
53498 long nr;
53499 @@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
53500 /************************************************************************/
53501
53502 /* permission checks */
53503 -static int proc_fd_access_allowed(struct inode *inode)
53504 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
53505 {
53506 struct task_struct *task;
53507 int allowed = 0;
53508 @@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
53509 */
53510 task = get_proc_task(inode);
53511 if (task) {
53512 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53513 + if (log)
53514 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
53515 + else
53516 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53517 put_task_struct(task);
53518 }
53519 return allowed;
53520 @@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
53521 static int mem_open(struct inode* inode, struct file* file)
53522 {
53523 file->private_data = (void*)((long)current->self_exec_id);
53524 +
53525 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53526 + file->f_version = current->exec_id;
53527 +#endif
53528 +
53529 return 0;
53530 }
53531
53532 +static int task_dumpable(struct task_struct *task);
53533 +
53534 static ssize_t mem_read(struct file * file, char __user * buf,
53535 size_t count, loff_t *ppos)
53536 {
53537 @@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
53538 int ret = -ESRCH;
53539 struct mm_struct *mm;
53540
53541 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53542 + if (file->f_version != current->exec_id) {
53543 + gr_log_badprocpid("mem");
53544 + return 0;
53545 + }
53546 +#endif
53547 +
53548 if (!task)
53549 goto out_no_task;
53550
53551 @@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
53552 if (!task)
53553 goto out_no_task;
53554
53555 + if (gr_acl_handle_procpidmem(task))
53556 + goto out;
53557 +
53558 if (!ptrace_may_access(task, PTRACE_MODE_READ))
53559 goto out;
53560
53561 @@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
53562 path_put(&nd->path);
53563
53564 /* Are we allowed to snoop on the tasks file descriptors? */
53565 - if (!proc_fd_access_allowed(inode))
53566 + if (!proc_fd_access_allowed(inode,0))
53567 goto out;
53568
53569 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
53570 @@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
53571 struct path path;
53572
53573 /* Are we allowed to snoop on the tasks file descriptors? */
53574 - if (!proc_fd_access_allowed(inode))
53575 - goto out;
53576 + /* logging this is needed for learning on chromium to work properly,
53577 + but we don't want to flood the logs from 'ps' which does a readlink
53578 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
53579 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
53580 + */
53581 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
53582 + if (!proc_fd_access_allowed(inode,0))
53583 + goto out;
53584 + } else {
53585 + if (!proc_fd_access_allowed(inode,1))
53586 + goto out;
53587 + }
53588
53589 error = PROC_I(inode)->op.proc_get_link(inode, &path);
53590 if (error)
53591 @@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
53592 rcu_read_lock();
53593 cred = __task_cred(task);
53594 inode->i_uid = cred->euid;
53595 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53596 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53597 +#else
53598 inode->i_gid = cred->egid;
53599 +#endif
53600 rcu_read_unlock();
53601 }
53602 security_task_to_inode(task, inode);
53603 @@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53604 struct inode *inode = dentry->d_inode;
53605 struct task_struct *task;
53606 const struct cred *cred;
53607 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53608 + const struct cred *tmpcred = current_cred();
53609 +#endif
53610
53611 generic_fillattr(inode, stat);
53612
53613 @@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53614 stat->uid = 0;
53615 stat->gid = 0;
53616 task = pid_task(proc_pid(inode), PIDTYPE_PID);
53617 +
53618 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
53619 + rcu_read_unlock();
53620 + return -ENOENT;
53621 + }
53622 +
53623 if (task) {
53624 + cred = __task_cred(task);
53625 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53626 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
53627 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53628 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53629 +#endif
53630 + ) {
53631 +#endif
53632 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53633 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53634 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53635 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53636 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53637 +#endif
53638 task_dumpable(task)) {
53639 - cred = __task_cred(task);
53640 stat->uid = cred->euid;
53641 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53642 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
53643 +#else
53644 stat->gid = cred->egid;
53645 +#endif
53646 }
53647 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53648 + } else {
53649 + rcu_read_unlock();
53650 + return -ENOENT;
53651 + }
53652 +#endif
53653 }
53654 rcu_read_unlock();
53655 return 0;
53656 @@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
53657
53658 if (task) {
53659 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53660 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53661 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53662 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53663 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53664 +#endif
53665 task_dumpable(task)) {
53666 rcu_read_lock();
53667 cred = __task_cred(task);
53668 inode->i_uid = cred->euid;
53669 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53670 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53671 +#else
53672 inode->i_gid = cred->egid;
53673 +#endif
53674 rcu_read_unlock();
53675 } else {
53676 inode->i_uid = 0;
53677 @@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
53678 int fd = proc_fd(inode);
53679
53680 if (task) {
53681 - files = get_files_struct(task);
53682 + if (!gr_acl_handle_procpidmem(task))
53683 + files = get_files_struct(task);
53684 put_task_struct(task);
53685 }
53686 if (files) {
53687 @@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = {
53688 static int proc_fd_permission(struct inode *inode, int mask)
53689 {
53690 int rv;
53691 + struct task_struct *task;
53692
53693 rv = generic_permission(inode, mask, NULL);
53694 - if (rv == 0)
53695 - return 0;
53696 +
53697 if (task_pid(current) == proc_pid(inode))
53698 rv = 0;
53699 +
53700 + task = get_proc_task(inode);
53701 + if (task == NULL)
53702 + return rv;
53703 +
53704 + if (gr_acl_handle_procpidmem(task))
53705 + rv = -EACCES;
53706 +
53707 + put_task_struct(task);
53708 +
53709 return rv;
53710 }
53711
53712 @@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
53713 if (!task)
53714 goto out_no_task;
53715
53716 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53717 + goto out;
53718 +
53719 /*
53720 * Yes, it does not scale. And it should not. Don't add
53721 * new entries into /proc/<tgid>/ without very good reasons.
53722 @@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp,
53723 if (!task)
53724 goto out_no_task;
53725
53726 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53727 + goto out;
53728 +
53729 ret = 0;
53730 i = filp->f_pos;
53731 switch (i) {
53732 @@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
53733 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
53734 void *cookie)
53735 {
53736 - char *s = nd_get_link(nd);
53737 + const char *s = nd_get_link(nd);
53738 if (!IS_ERR(s))
53739 __putname(s);
53740 }
53741 @@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = {
53742 #ifdef CONFIG_SCHED_DEBUG
53743 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53744 #endif
53745 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53746 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53747 INF("syscall", S_IRUGO, proc_pid_syscall),
53748 #endif
53749 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53750 @@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = {
53751 #ifdef CONFIG_SECURITY
53752 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53753 #endif
53754 -#ifdef CONFIG_KALLSYMS
53755 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53756 INF("wchan", S_IRUGO, proc_pid_wchan),
53757 #endif
53758 -#ifdef CONFIG_STACKTRACE
53759 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53760 ONE("stack", S_IRUGO, proc_pid_stack),
53761 #endif
53762 #ifdef CONFIG_SCHEDSTATS
53763 @@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
53764 #ifdef CONFIG_TASK_IO_ACCOUNTING
53765 INF("io", S_IRUSR, proc_tgid_io_accounting),
53766 #endif
53767 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53768 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
53769 +#endif
53770 };
53771
53772 static int proc_tgid_base_readdir(struct file * filp,
53773 @@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
53774 if (!inode)
53775 goto out;
53776
53777 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53778 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
53779 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53780 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53781 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
53782 +#else
53783 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
53784 +#endif
53785 inode->i_op = &proc_tgid_base_inode_operations;
53786 inode->i_fop = &proc_tgid_base_operations;
53787 inode->i_flags|=S_IMMUTABLE;
53788 @@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
53789 if (!task)
53790 goto out;
53791
53792 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53793 + goto out_put_task;
53794 +
53795 result = proc_pid_instantiate(dir, dentry, task, NULL);
53796 +out_put_task:
53797 put_task_struct(task);
53798 out:
53799 return result;
53800 @@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53801 {
53802 unsigned int nr;
53803 struct task_struct *reaper;
53804 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53805 + const struct cred *tmpcred = current_cred();
53806 + const struct cred *itercred;
53807 +#endif
53808 + filldir_t __filldir = filldir;
53809 struct tgid_iter iter;
53810 struct pid_namespace *ns;
53811
53812 @@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53813 for (iter = next_tgid(ns, iter);
53814 iter.task;
53815 iter.tgid += 1, iter = next_tgid(ns, iter)) {
53816 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53817 + rcu_read_lock();
53818 + itercred = __task_cred(iter.task);
53819 +#endif
53820 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
53821 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53822 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
53823 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53824 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53825 +#endif
53826 + )
53827 +#endif
53828 + )
53829 + __filldir = &gr_fake_filldir;
53830 + else
53831 + __filldir = filldir;
53832 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53833 + rcu_read_unlock();
53834 +#endif
53835 filp->f_pos = iter.tgid + TGID_OFFSET;
53836 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
53837 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
53838 put_task_struct(iter.task);
53839 goto out;
53840 }
53841 @@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
53842 #ifdef CONFIG_SCHED_DEBUG
53843 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53844 #endif
53845 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53846 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53847 INF("syscall", S_IRUGO, proc_pid_syscall),
53848 #endif
53849 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53850 @@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = {
53851 #ifdef CONFIG_SECURITY
53852 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53853 #endif
53854 -#ifdef CONFIG_KALLSYMS
53855 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53856 INF("wchan", S_IRUGO, proc_pid_wchan),
53857 #endif
53858 -#ifdef CONFIG_STACKTRACE
53859 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53860 ONE("stack", S_IRUGO, proc_pid_stack),
53861 #endif
53862 #ifdef CONFIG_SCHEDSTATS
53863 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
53864 index 82676e3..5f8518a 100644
53865 --- a/fs/proc/cmdline.c
53866 +++ b/fs/proc/cmdline.c
53867 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
53868
53869 static int __init proc_cmdline_init(void)
53870 {
53871 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53872 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
53873 +#else
53874 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
53875 +#endif
53876 return 0;
53877 }
53878 module_init(proc_cmdline_init);
53879 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
53880 index 59ee7da..469b4b6 100644
53881 --- a/fs/proc/devices.c
53882 +++ b/fs/proc/devices.c
53883 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
53884
53885 static int __init proc_devices_init(void)
53886 {
53887 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53888 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
53889 +#else
53890 proc_create("devices", 0, NULL, &proc_devinfo_operations);
53891 +#endif
53892 return 0;
53893 }
53894 module_init(proc_devices_init);
53895 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
53896 index d78ade3..81767f9 100644
53897 --- a/fs/proc/inode.c
53898 +++ b/fs/proc/inode.c
53899 @@ -18,12 +18,19 @@
53900 #include <linux/module.h>
53901 #include <linux/smp_lock.h>
53902 #include <linux/sysctl.h>
53903 +#include <linux/grsecurity.h>
53904
53905 #include <asm/system.h>
53906 #include <asm/uaccess.h>
53907
53908 #include "internal.h"
53909
53910 +#ifdef CONFIG_PROC_SYSCTL
53911 +extern const struct inode_operations proc_sys_inode_operations;
53912 +extern const struct inode_operations proc_sys_dir_operations;
53913 +#endif
53914 +
53915 +
53916 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
53917 {
53918 atomic_inc(&de->count);
53919 @@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
53920 de_put(de);
53921 if (PROC_I(inode)->sysctl)
53922 sysctl_head_put(PROC_I(inode)->sysctl);
53923 +
53924 +#ifdef CONFIG_PROC_SYSCTL
53925 + if (inode->i_op == &proc_sys_inode_operations ||
53926 + inode->i_op == &proc_sys_dir_operations)
53927 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
53928 +#endif
53929 +
53930 clear_inode(inode);
53931 }
53932
53933 @@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
53934 if (de->mode) {
53935 inode->i_mode = de->mode;
53936 inode->i_uid = de->uid;
53937 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53938 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53939 +#else
53940 inode->i_gid = de->gid;
53941 +#endif
53942 }
53943 if (de->size)
53944 inode->i_size = de->size;
53945 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
53946 index 753ca37..26bcf3b 100644
53947 --- a/fs/proc/internal.h
53948 +++ b/fs/proc/internal.h
53949 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53950 struct pid *pid, struct task_struct *task);
53951 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53952 struct pid *pid, struct task_struct *task);
53953 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53954 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
53955 +#endif
53956 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
53957
53958 extern const struct file_operations proc_maps_operations;
53959 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
53960 index b442dac..aab29cb 100644
53961 --- a/fs/proc/kcore.c
53962 +++ b/fs/proc/kcore.c
53963 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
53964 off_t offset = 0;
53965 struct kcore_list *m;
53966
53967 + pax_track_stack();
53968 +
53969 /* setup ELF header */
53970 elf = (struct elfhdr *) bufp;
53971 bufp += sizeof(struct elfhdr);
53972 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53973 * the addresses in the elf_phdr on our list.
53974 */
53975 start = kc_offset_to_vaddr(*fpos - elf_buflen);
53976 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
53977 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
53978 + if (tsz > buflen)
53979 tsz = buflen;
53980 -
53981 +
53982 while (buflen) {
53983 struct kcore_list *m;
53984
53985 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53986 kfree(elf_buf);
53987 } else {
53988 if (kern_addr_valid(start)) {
53989 - unsigned long n;
53990 + char *elf_buf;
53991 + mm_segment_t oldfs;
53992
53993 - n = copy_to_user(buffer, (char *)start, tsz);
53994 - /*
53995 - * We cannot distingush between fault on source
53996 - * and fault on destination. When this happens
53997 - * we clear too and hope it will trigger the
53998 - * EFAULT again.
53999 - */
54000 - if (n) {
54001 - if (clear_user(buffer + tsz - n,
54002 - n))
54003 + elf_buf = kmalloc(tsz, GFP_KERNEL);
54004 + if (!elf_buf)
54005 + return -ENOMEM;
54006 + oldfs = get_fs();
54007 + set_fs(KERNEL_DS);
54008 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
54009 + set_fs(oldfs);
54010 + if (copy_to_user(buffer, elf_buf, tsz)) {
54011 + kfree(elf_buf);
54012 return -EFAULT;
54013 + }
54014 }
54015 + set_fs(oldfs);
54016 + kfree(elf_buf);
54017 } else {
54018 if (clear_user(buffer, tsz))
54019 return -EFAULT;
54020 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54021
54022 static int open_kcore(struct inode *inode, struct file *filp)
54023 {
54024 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
54025 + return -EPERM;
54026 +#endif
54027 if (!capable(CAP_SYS_RAWIO))
54028 return -EPERM;
54029 if (kcore_need_update)
54030 diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
54031 index 7ca7834..cfe90a4 100644
54032 --- a/fs/proc/kmsg.c
54033 +++ b/fs/proc/kmsg.c
54034 @@ -12,37 +12,37 @@
54035 #include <linux/poll.h>
54036 #include <linux/proc_fs.h>
54037 #include <linux/fs.h>
54038 +#include <linux/syslog.h>
54039
54040 #include <asm/uaccess.h>
54041 #include <asm/io.h>
54042
54043 extern wait_queue_head_t log_wait;
54044
54045 -extern int do_syslog(int type, char __user *bug, int count);
54046 -
54047 static int kmsg_open(struct inode * inode, struct file * file)
54048 {
54049 - return do_syslog(1,NULL,0);
54050 + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
54051 }
54052
54053 static int kmsg_release(struct inode * inode, struct file * file)
54054 {
54055 - (void) do_syslog(0,NULL,0);
54056 + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
54057 return 0;
54058 }
54059
54060 static ssize_t kmsg_read(struct file *file, char __user *buf,
54061 size_t count, loff_t *ppos)
54062 {
54063 - if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
54064 + if ((file->f_flags & O_NONBLOCK) &&
54065 + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
54066 return -EAGAIN;
54067 - return do_syslog(2, buf, count);
54068 + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
54069 }
54070
54071 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
54072 {
54073 poll_wait(file, &log_wait, wait);
54074 - if (do_syslog(9, NULL, 0))
54075 + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
54076 return POLLIN | POLLRDNORM;
54077 return 0;
54078 }
54079 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
54080 index a65239c..ad1182a 100644
54081 --- a/fs/proc/meminfo.c
54082 +++ b/fs/proc/meminfo.c
54083 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
54084 unsigned long pages[NR_LRU_LISTS];
54085 int lru;
54086
54087 + pax_track_stack();
54088 +
54089 /*
54090 * display in kilobytes.
54091 */
54092 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
54093 vmi.used >> 10,
54094 vmi.largest_chunk >> 10
54095 #ifdef CONFIG_MEMORY_FAILURE
54096 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
54097 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
54098 #endif
54099 );
54100
54101 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
54102 index 9fe7d7e..cdb62c9 100644
54103 --- a/fs/proc/nommu.c
54104 +++ b/fs/proc/nommu.c
54105 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
54106 if (len < 1)
54107 len = 1;
54108 seq_printf(m, "%*c", len, ' ');
54109 - seq_path(m, &file->f_path, "");
54110 + seq_path(m, &file->f_path, "\n\\");
54111 }
54112
54113 seq_putc(m, '\n');
54114 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
54115 index 04d1270..25e1173 100644
54116 --- a/fs/proc/proc_net.c
54117 +++ b/fs/proc/proc_net.c
54118 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
54119 struct task_struct *task;
54120 struct nsproxy *ns;
54121 struct net *net = NULL;
54122 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54123 + const struct cred *cred = current_cred();
54124 +#endif
54125 +
54126 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54127 + if (cred->fsuid)
54128 + return net;
54129 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54130 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
54131 + return net;
54132 +#endif
54133
54134 rcu_read_lock();
54135 task = pid_task(proc_pid(dir), PIDTYPE_PID);
54136 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
54137 index f667e8a..55f4d96 100644
54138 --- a/fs/proc/proc_sysctl.c
54139 +++ b/fs/proc/proc_sysctl.c
54140 @@ -7,11 +7,13 @@
54141 #include <linux/security.h>
54142 #include "internal.h"
54143
54144 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
54145 +
54146 static const struct dentry_operations proc_sys_dentry_operations;
54147 static const struct file_operations proc_sys_file_operations;
54148 -static const struct inode_operations proc_sys_inode_operations;
54149 +const struct inode_operations proc_sys_inode_operations;
54150 static const struct file_operations proc_sys_dir_file_operations;
54151 -static const struct inode_operations proc_sys_dir_operations;
54152 +const struct inode_operations proc_sys_dir_operations;
54153
54154 static struct inode *proc_sys_make_inode(struct super_block *sb,
54155 struct ctl_table_header *head, struct ctl_table *table)
54156 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
54157 if (!p)
54158 goto out;
54159
54160 + if (gr_handle_sysctl(p, MAY_EXEC))
54161 + goto out;
54162 +
54163 err = ERR_PTR(-ENOMEM);
54164 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
54165 if (h)
54166 @@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
54167
54168 err = NULL;
54169 dentry->d_op = &proc_sys_dentry_operations;
54170 +
54171 + gr_handle_proc_create(dentry, inode);
54172 +
54173 d_add(dentry, inode);
54174
54175 out:
54176 @@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
54177 return -ENOMEM;
54178 } else {
54179 child->d_op = &proc_sys_dentry_operations;
54180 +
54181 + gr_handle_proc_create(child, inode);
54182 +
54183 d_add(child, inode);
54184 }
54185 } else {
54186 @@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
54187 if (*pos < file->f_pos)
54188 continue;
54189
54190 + if (gr_handle_sysctl(table, 0))
54191 + continue;
54192 +
54193 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
54194 if (res)
54195 return res;
54196 @@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
54197 if (IS_ERR(head))
54198 return PTR_ERR(head);
54199
54200 + if (table && gr_handle_sysctl(table, MAY_EXEC))
54201 + return -ENOENT;
54202 +
54203 generic_fillattr(inode, stat);
54204 if (table)
54205 stat->mode = (stat->mode & S_IFMT) | table->mode;
54206 @@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
54207 };
54208
54209 static const struct file_operations proc_sys_dir_file_operations = {
54210 + .read = generic_read_dir,
54211 .readdir = proc_sys_readdir,
54212 .llseek = generic_file_llseek,
54213 };
54214
54215 -static const struct inode_operations proc_sys_inode_operations = {
54216 +const struct inode_operations proc_sys_inode_operations = {
54217 .permission = proc_sys_permission,
54218 .setattr = proc_sys_setattr,
54219 .getattr = proc_sys_getattr,
54220 };
54221
54222 -static const struct inode_operations proc_sys_dir_operations = {
54223 +const struct inode_operations proc_sys_dir_operations = {
54224 .lookup = proc_sys_lookup,
54225 .permission = proc_sys_permission,
54226 .setattr = proc_sys_setattr,
54227 diff --git a/fs/proc/root.c b/fs/proc/root.c
54228 index b080b79..d957e63 100644
54229 --- a/fs/proc/root.c
54230 +++ b/fs/proc/root.c
54231 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
54232 #ifdef CONFIG_PROC_DEVICETREE
54233 proc_device_tree_init();
54234 #endif
54235 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
54236 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54237 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
54238 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54239 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54240 +#endif
54241 +#else
54242 proc_mkdir("bus", NULL);
54243 +#endif
54244 proc_sys_init();
54245 }
54246
54247 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
54248 index 3b7b82a..4b420b0 100644
54249 --- a/fs/proc/task_mmu.c
54250 +++ b/fs/proc/task_mmu.c
54251 @@ -8,6 +8,7 @@
54252 #include <linux/mempolicy.h>
54253 #include <linux/swap.h>
54254 #include <linux/swapops.h>
54255 +#include <linux/grsecurity.h>
54256
54257 #include <asm/elf.h>
54258 #include <asm/uaccess.h>
54259 @@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54260 "VmStk:\t%8lu kB\n"
54261 "VmExe:\t%8lu kB\n"
54262 "VmLib:\t%8lu kB\n"
54263 - "VmPTE:\t%8lu kB\n",
54264 - hiwater_vm << (PAGE_SHIFT-10),
54265 + "VmPTE:\t%8lu kB\n"
54266 +
54267 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54268 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
54269 +#endif
54270 +
54271 + ,hiwater_vm << (PAGE_SHIFT-10),
54272 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
54273 mm->locked_vm << (PAGE_SHIFT-10),
54274 hiwater_rss << (PAGE_SHIFT-10),
54275 total_rss << (PAGE_SHIFT-10),
54276 data << (PAGE_SHIFT-10),
54277 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54278 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
54279 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
54280 +
54281 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54282 + , mm->context.user_cs_base, mm->context.user_cs_limit
54283 +#endif
54284 +
54285 + );
54286 }
54287
54288 unsigned long task_vsize(struct mm_struct *mm)
54289 @@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
54290 struct proc_maps_private *priv = m->private;
54291 struct vm_area_struct *vma = v;
54292
54293 - vma_stop(priv, vma);
54294 + if (!IS_ERR(vma))
54295 + vma_stop(priv, vma);
54296 if (priv->task)
54297 put_task_struct(priv->task);
54298 }
54299 @@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
54300 return ret;
54301 }
54302
54303 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54304 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54305 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
54306 + _mm->pax_flags & MF_PAX_SEGMEXEC))
54307 +#endif
54308 +
54309 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54310 {
54311 struct mm_struct *mm = vma->vm_mm;
54312 @@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54313 int flags = vma->vm_flags;
54314 unsigned long ino = 0;
54315 unsigned long long pgoff = 0;
54316 - unsigned long start;
54317 dev_t dev = 0;
54318 int len;
54319
54320 @@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54321 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
54322 }
54323
54324 - /* We don't show the stack guard page in /proc/maps */
54325 - start = vma->vm_start;
54326 - if (vma->vm_flags & VM_GROWSDOWN)
54327 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
54328 - start += PAGE_SIZE;
54329 -
54330 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
54331 - start,
54332 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54333 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
54334 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
54335 +#else
54336 + vma->vm_start,
54337 vma->vm_end,
54338 +#endif
54339 flags & VM_READ ? 'r' : '-',
54340 flags & VM_WRITE ? 'w' : '-',
54341 flags & VM_EXEC ? 'x' : '-',
54342 flags & VM_MAYSHARE ? 's' : 'p',
54343 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54344 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
54345 +#else
54346 pgoff,
54347 +#endif
54348 MAJOR(dev), MINOR(dev), ino, &len);
54349
54350 /*
54351 @@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54352 */
54353 if (file) {
54354 pad_len_spaces(m, len);
54355 - seq_path(m, &file->f_path, "\n");
54356 + seq_path(m, &file->f_path, "\n\\");
54357 } else {
54358 const char *name = arch_vma_name(vma);
54359 if (!name) {
54360 @@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54361 if (vma->vm_start <= mm->brk &&
54362 vma->vm_end >= mm->start_brk) {
54363 name = "[heap]";
54364 - } else if (vma->vm_start <= mm->start_stack &&
54365 - vma->vm_end >= mm->start_stack) {
54366 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
54367 + (vma->vm_start <= mm->start_stack &&
54368 + vma->vm_end >= mm->start_stack)) {
54369 name = "[stack]";
54370 }
54371 } else {
54372 @@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
54373 struct proc_maps_private *priv = m->private;
54374 struct task_struct *task = priv->task;
54375
54376 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54377 + if (current->exec_id != m->exec_id) {
54378 + gr_log_badprocpid("maps");
54379 + return 0;
54380 + }
54381 +#endif
54382 +
54383 show_map_vma(m, vma);
54384
54385 if (m->count < m->size) /* vma is copied successfully */
54386 @@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
54387 .private = &mss,
54388 };
54389
54390 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54391 + if (current->exec_id != m->exec_id) {
54392 + gr_log_badprocpid("smaps");
54393 + return 0;
54394 + }
54395 +#endif
54396 memset(&mss, 0, sizeof mss);
54397 - mss.vma = vma;
54398 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54399 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54400 +
54401 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54402 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
54403 +#endif
54404 + mss.vma = vma;
54405 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54406 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54407 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54408 + }
54409 +#endif
54410
54411 show_map_vma(m, vma);
54412
54413 @@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
54414 "Swap: %8lu kB\n"
54415 "KernelPageSize: %8lu kB\n"
54416 "MMUPageSize: %8lu kB\n",
54417 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54418 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
54419 +#else
54420 (vma->vm_end - vma->vm_start) >> 10,
54421 +#endif
54422 mss.resident >> 10,
54423 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
54424 mss.shared_clean >> 10,
54425 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
54426 index 8f5c05d..c99c76d 100644
54427 --- a/fs/proc/task_nommu.c
54428 +++ b/fs/proc/task_nommu.c
54429 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54430 else
54431 bytes += kobjsize(mm);
54432
54433 - if (current->fs && current->fs->users > 1)
54434 + if (current->fs && atomic_read(&current->fs->users) > 1)
54435 sbytes += kobjsize(current->fs);
54436 else
54437 bytes += kobjsize(current->fs);
54438 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
54439 if (len < 1)
54440 len = 1;
54441 seq_printf(m, "%*c", len, ' ');
54442 - seq_path(m, &file->f_path, "");
54443 + seq_path(m, &file->f_path, "\n\\");
54444 }
54445
54446 seq_putc(m, '\n');
54447 diff --git a/fs/readdir.c b/fs/readdir.c
54448 index 7723401..30059a6 100644
54449 --- a/fs/readdir.c
54450 +++ b/fs/readdir.c
54451 @@ -16,6 +16,7 @@
54452 #include <linux/security.h>
54453 #include <linux/syscalls.h>
54454 #include <linux/unistd.h>
54455 +#include <linux/namei.h>
54456
54457 #include <asm/uaccess.h>
54458
54459 @@ -67,6 +68,7 @@ struct old_linux_dirent {
54460
54461 struct readdir_callback {
54462 struct old_linux_dirent __user * dirent;
54463 + struct file * file;
54464 int result;
54465 };
54466
54467 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
54468 buf->result = -EOVERFLOW;
54469 return -EOVERFLOW;
54470 }
54471 +
54472 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54473 + return 0;
54474 +
54475 buf->result++;
54476 dirent = buf->dirent;
54477 if (!access_ok(VERIFY_WRITE, dirent,
54478 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
54479
54480 buf.result = 0;
54481 buf.dirent = dirent;
54482 + buf.file = file;
54483
54484 error = vfs_readdir(file, fillonedir, &buf);
54485 if (buf.result)
54486 @@ -142,6 +149,7 @@ struct linux_dirent {
54487 struct getdents_callback {
54488 struct linux_dirent __user * current_dir;
54489 struct linux_dirent __user * previous;
54490 + struct file * file;
54491 int count;
54492 int error;
54493 };
54494 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
54495 buf->error = -EOVERFLOW;
54496 return -EOVERFLOW;
54497 }
54498 +
54499 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54500 + return 0;
54501 +
54502 dirent = buf->previous;
54503 if (dirent) {
54504 if (__put_user(offset, &dirent->d_off))
54505 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
54506 buf.previous = NULL;
54507 buf.count = count;
54508 buf.error = 0;
54509 + buf.file = file;
54510
54511 error = vfs_readdir(file, filldir, &buf);
54512 if (error >= 0)
54513 @@ -228,6 +241,7 @@ out:
54514 struct getdents_callback64 {
54515 struct linux_dirent64 __user * current_dir;
54516 struct linux_dirent64 __user * previous;
54517 + struct file *file;
54518 int count;
54519 int error;
54520 };
54521 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
54522 buf->error = -EINVAL; /* only used if we fail.. */
54523 if (reclen > buf->count)
54524 return -EINVAL;
54525 +
54526 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54527 + return 0;
54528 +
54529 dirent = buf->previous;
54530 if (dirent) {
54531 if (__put_user(offset, &dirent->d_off))
54532 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54533
54534 buf.current_dir = dirent;
54535 buf.previous = NULL;
54536 + buf.file = file;
54537 buf.count = count;
54538 buf.error = 0;
54539
54540 @@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54541 error = buf.error;
54542 lastdirent = buf.previous;
54543 if (lastdirent) {
54544 - typeof(lastdirent->d_off) d_off = file->f_pos;
54545 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
54546 if (__put_user(d_off, &lastdirent->d_off))
54547 error = -EFAULT;
54548 else
54549 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
54550 index d42c30c..4fd8718 100644
54551 --- a/fs/reiserfs/dir.c
54552 +++ b/fs/reiserfs/dir.c
54553 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
54554 struct reiserfs_dir_entry de;
54555 int ret = 0;
54556
54557 + pax_track_stack();
54558 +
54559 reiserfs_write_lock(inode->i_sb);
54560
54561 reiserfs_check_lock_depth(inode->i_sb, "readdir");
54562 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
54563 index 128d3f7..8840d44 100644
54564 --- a/fs/reiserfs/do_balan.c
54565 +++ b/fs/reiserfs/do_balan.c
54566 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
54567 return;
54568 }
54569
54570 - atomic_inc(&(fs_generation(tb->tb_sb)));
54571 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
54572 do_balance_starts(tb);
54573
54574 /* balance leaf returns 0 except if combining L R and S into
54575 diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
54576 index 72cb1cc..d0e3181 100644
54577 --- a/fs/reiserfs/item_ops.c
54578 +++ b/fs/reiserfs/item_ops.c
54579 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
54580 vi->vi_index, vi->vi_type, vi->vi_ih);
54581 }
54582
54583 -static struct item_operations stat_data_ops = {
54584 +static const struct item_operations stat_data_ops = {
54585 .bytes_number = sd_bytes_number,
54586 .decrement_key = sd_decrement_key,
54587 .is_left_mergeable = sd_is_left_mergeable,
54588 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
54589 vi->vi_index, vi->vi_type, vi->vi_ih);
54590 }
54591
54592 -static struct item_operations direct_ops = {
54593 +static const struct item_operations direct_ops = {
54594 .bytes_number = direct_bytes_number,
54595 .decrement_key = direct_decrement_key,
54596 .is_left_mergeable = direct_is_left_mergeable,
54597 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
54598 vi->vi_index, vi->vi_type, vi->vi_ih);
54599 }
54600
54601 -static struct item_operations indirect_ops = {
54602 +static const struct item_operations indirect_ops = {
54603 .bytes_number = indirect_bytes_number,
54604 .decrement_key = indirect_decrement_key,
54605 .is_left_mergeable = indirect_is_left_mergeable,
54606 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
54607 printk("\n");
54608 }
54609
54610 -static struct item_operations direntry_ops = {
54611 +static const struct item_operations direntry_ops = {
54612 .bytes_number = direntry_bytes_number,
54613 .decrement_key = direntry_decrement_key,
54614 .is_left_mergeable = direntry_is_left_mergeable,
54615 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
54616 "Invalid item type observed, run fsck ASAP");
54617 }
54618
54619 -static struct item_operations errcatch_ops = {
54620 +static const struct item_operations errcatch_ops = {
54621 errcatch_bytes_number,
54622 errcatch_decrement_key,
54623 errcatch_is_left_mergeable,
54624 @@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
54625 #error Item types must use disk-format assigned values.
54626 #endif
54627
54628 -struct item_operations *item_ops[TYPE_ANY + 1] = {
54629 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
54630 &stat_data_ops,
54631 &indirect_ops,
54632 &direct_ops,
54633 diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
54634 index b5fe0aa..e0e25c4 100644
54635 --- a/fs/reiserfs/journal.c
54636 +++ b/fs/reiserfs/journal.c
54637 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
54638 struct buffer_head *bh;
54639 int i, j;
54640
54641 + pax_track_stack();
54642 +
54643 bh = __getblk(dev, block, bufsize);
54644 if (buffer_uptodate(bh))
54645 return (bh);
54646 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
54647 index 2715791..b8996db 100644
54648 --- a/fs/reiserfs/namei.c
54649 +++ b/fs/reiserfs/namei.c
54650 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
54651 unsigned long savelink = 1;
54652 struct timespec ctime;
54653
54654 + pax_track_stack();
54655 +
54656 /* three balancings: (1) old name removal, (2) new name insertion
54657 and (3) maybe "save" link insertion
54658 stat data updates: (1) old directory,
54659 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
54660 index 9229e55..3d2e3b7 100644
54661 --- a/fs/reiserfs/procfs.c
54662 +++ b/fs/reiserfs/procfs.c
54663 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
54664 "SMALL_TAILS " : "NO_TAILS ",
54665 replay_only(sb) ? "REPLAY_ONLY " : "",
54666 convert_reiserfs(sb) ? "CONV " : "",
54667 - atomic_read(&r->s_generation_counter),
54668 + atomic_read_unchecked(&r->s_generation_counter),
54669 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
54670 SF(s_do_balance), SF(s_unneeded_left_neighbor),
54671 SF(s_good_search_by_key_reada), SF(s_bmaps),
54672 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
54673 struct journal_params *jp = &rs->s_v1.s_journal;
54674 char b[BDEVNAME_SIZE];
54675
54676 + pax_track_stack();
54677 +
54678 seq_printf(m, /* on-disk fields */
54679 "jp_journal_1st_block: \t%i\n"
54680 "jp_journal_dev: \t%s[%x]\n"
54681 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
54682 index d036ee5..4c7dca1 100644
54683 --- a/fs/reiserfs/stree.c
54684 +++ b/fs/reiserfs/stree.c
54685 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
54686 int iter = 0;
54687 #endif
54688
54689 + pax_track_stack();
54690 +
54691 BUG_ON(!th->t_trans_id);
54692
54693 init_tb_struct(th, &s_del_balance, sb, path,
54694 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
54695 int retval;
54696 int quota_cut_bytes = 0;
54697
54698 + pax_track_stack();
54699 +
54700 BUG_ON(!th->t_trans_id);
54701
54702 le_key2cpu_key(&cpu_key, key);
54703 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
54704 int quota_cut_bytes;
54705 loff_t tail_pos = 0;
54706
54707 + pax_track_stack();
54708 +
54709 BUG_ON(!th->t_trans_id);
54710
54711 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
54712 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
54713 int retval;
54714 int fs_gen;
54715
54716 + pax_track_stack();
54717 +
54718 BUG_ON(!th->t_trans_id);
54719
54720 fs_gen = get_generation(inode->i_sb);
54721 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
54722 int fs_gen = 0;
54723 int quota_bytes = 0;
54724
54725 + pax_track_stack();
54726 +
54727 BUG_ON(!th->t_trans_id);
54728
54729 if (inode) { /* Do we count quotas for item? */
54730 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
54731 index 7cb1285..c726cd0 100644
54732 --- a/fs/reiserfs/super.c
54733 +++ b/fs/reiserfs/super.c
54734 @@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
54735 {.option_name = NULL}
54736 };
54737
54738 + pax_track_stack();
54739 +
54740 *blocks = 0;
54741 if (!options || !*options)
54742 /* use default configuration: create tails, journaling on, no
54743 diff --git a/fs/select.c b/fs/select.c
54744 index fd38ce2..f5381b8 100644
54745 --- a/fs/select.c
54746 +++ b/fs/select.c
54747 @@ -20,6 +20,7 @@
54748 #include <linux/module.h>
54749 #include <linux/slab.h>
54750 #include <linux/poll.h>
54751 +#include <linux/security.h>
54752 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
54753 #include <linux/file.h>
54754 #include <linux/fdtable.h>
54755 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
54756 int retval, i, timed_out = 0;
54757 unsigned long slack = 0;
54758
54759 + pax_track_stack();
54760 +
54761 rcu_read_lock();
54762 retval = max_select_fd(n, fds);
54763 rcu_read_unlock();
54764 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
54765 /* Allocate small arguments on the stack to save memory and be faster */
54766 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
54767
54768 + pax_track_stack();
54769 +
54770 ret = -EINVAL;
54771 if (n < 0)
54772 goto out_nofds;
54773 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
54774 struct poll_list *walk = head;
54775 unsigned long todo = nfds;
54776
54777 + pax_track_stack();
54778 +
54779 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
54780 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
54781 return -EINVAL;
54782
54783 diff --git a/fs/seq_file.c b/fs/seq_file.c
54784 index eae7d9d..4ddabe2 100644
54785 --- a/fs/seq_file.c
54786 +++ b/fs/seq_file.c
54787 @@ -9,6 +9,7 @@
54788 #include <linux/module.h>
54789 #include <linux/seq_file.h>
54790 #include <linux/slab.h>
54791 +#include <linux/sched.h>
54792
54793 #include <asm/uaccess.h>
54794 #include <asm/page.h>
54795 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
54796 memset(p, 0, sizeof(*p));
54797 mutex_init(&p->lock);
54798 p->op = op;
54799 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54800 + p->exec_id = current->exec_id;
54801 +#endif
54802
54803 /*
54804 * Wrappers around seq_open(e.g. swaps_open) need to be
54805 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54806 return 0;
54807 }
54808 if (!m->buf) {
54809 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54810 + m->size = PAGE_SIZE;
54811 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54812 if (!m->buf)
54813 return -ENOMEM;
54814 }
54815 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54816 Eoverflow:
54817 m->op->stop(m, p);
54818 kfree(m->buf);
54819 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54820 + m->size <<= 1;
54821 + m->buf = kmalloc(m->size, GFP_KERNEL);
54822 return !m->buf ? -ENOMEM : -EAGAIN;
54823 }
54824
54825 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54826 m->version = file->f_version;
54827 /* grab buffer if we didn't have one */
54828 if (!m->buf) {
54829 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54830 + m->size = PAGE_SIZE;
54831 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54832 if (!m->buf)
54833 goto Enomem;
54834 }
54835 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54836 goto Fill;
54837 m->op->stop(m, p);
54838 kfree(m->buf);
54839 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54840 + m->size <<= 1;
54841 + m->buf = kmalloc(m->size, GFP_KERNEL);
54842 if (!m->buf)
54843 goto Enomem;
54844 m->count = 0;
54845 @@ -551,7 +559,7 @@ static void single_stop(struct seq_file *p, void *v)
54846 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
54847 void *data)
54848 {
54849 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
54850 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
54851 int res = -ENOMEM;
54852
54853 if (op) {
54854 diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
54855 index 71c29b6..54694dd 100644
54856 --- a/fs/smbfs/proc.c
54857 +++ b/fs/smbfs/proc.c
54858 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
54859
54860 out:
54861 if (server->local_nls != NULL && server->remote_nls != NULL)
54862 - server->ops->convert = convert_cp;
54863 + *(void **)&server->ops->convert = convert_cp;
54864 else
54865 - server->ops->convert = convert_memcpy;
54866 + *(void **)&server->ops->convert = convert_memcpy;
54867
54868 smb_unlock_server(server);
54869 return n;
54870 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
54871
54872 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
54873 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
54874 - server->ops->getattr = smb_proc_getattr_core;
54875 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
54876 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
54877 - server->ops->getattr = smb_proc_getattr_ff;
54878 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
54879 }
54880
54881 /* Decode server capabilities */
54882 @@ -3439,7 +3439,7 @@ out:
54883 static void
54884 install_ops(struct smb_ops *dst, struct smb_ops *src)
54885 {
54886 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54887 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54888 }
54889
54890 /* < LANMAN2 */
54891 diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
54892 index 00b2909..2ace383 100644
54893 --- a/fs/smbfs/symlink.c
54894 +++ b/fs/smbfs/symlink.c
54895 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
54896
54897 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54898 {
54899 - char *s = nd_get_link(nd);
54900 + const char *s = nd_get_link(nd);
54901 if (!IS_ERR(s))
54902 __putname(s);
54903 }
54904 diff --git a/fs/splice.c b/fs/splice.c
54905 index bb92b7c..5aa72b0 100644
54906 --- a/fs/splice.c
54907 +++ b/fs/splice.c
54908 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54909 pipe_lock(pipe);
54910
54911 for (;;) {
54912 - if (!pipe->readers) {
54913 + if (!atomic_read(&pipe->readers)) {
54914 send_sig(SIGPIPE, current, 0);
54915 if (!ret)
54916 ret = -EPIPE;
54917 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54918 do_wakeup = 0;
54919 }
54920
54921 - pipe->waiting_writers++;
54922 + atomic_inc(&pipe->waiting_writers);
54923 pipe_wait(pipe);
54924 - pipe->waiting_writers--;
54925 + atomic_dec(&pipe->waiting_writers);
54926 }
54927
54928 pipe_unlock(pipe);
54929 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
54930 .spd_release = spd_release_page,
54931 };
54932
54933 + pax_track_stack();
54934 +
54935 index = *ppos >> PAGE_CACHE_SHIFT;
54936 loff = *ppos & ~PAGE_CACHE_MASK;
54937 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54938 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
54939 old_fs = get_fs();
54940 set_fs(get_ds());
54941 /* The cast to a user pointer is valid due to the set_fs() */
54942 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
54943 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
54944 set_fs(old_fs);
54945
54946 return res;
54947 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
54948 old_fs = get_fs();
54949 set_fs(get_ds());
54950 /* The cast to a user pointer is valid due to the set_fs() */
54951 - res = vfs_write(file, (const char __user *)buf, count, &pos);
54952 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
54953 set_fs(old_fs);
54954
54955 return res;
54956 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54957 .spd_release = spd_release_page,
54958 };
54959
54960 + pax_track_stack();
54961 +
54962 index = *ppos >> PAGE_CACHE_SHIFT;
54963 offset = *ppos & ~PAGE_CACHE_MASK;
54964 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54965 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54966 goto err;
54967
54968 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
54969 - vec[i].iov_base = (void __user *) page_address(page);
54970 + vec[i].iov_base = (__force void __user *) page_address(page);
54971 vec[i].iov_len = this_len;
54972 pages[i] = page;
54973 spd.nr_pages++;
54974 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
54975 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
54976 {
54977 while (!pipe->nrbufs) {
54978 - if (!pipe->writers)
54979 + if (!atomic_read(&pipe->writers))
54980 return 0;
54981
54982 - if (!pipe->waiting_writers && sd->num_spliced)
54983 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
54984 return 0;
54985
54986 if (sd->flags & SPLICE_F_NONBLOCK)
54987 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
54988 * out of the pipe right after the splice_to_pipe(). So set
54989 * PIPE_READERS appropriately.
54990 */
54991 - pipe->readers = 1;
54992 + atomic_set(&pipe->readers, 1);
54993
54994 current->splice_pipe = pipe;
54995 }
54996 @@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
54997 .spd_release = spd_release_page,
54998 };
54999
55000 + pax_track_stack();
55001 +
55002 pipe = pipe_info(file->f_path.dentry->d_inode);
55003 if (!pipe)
55004 return -EBADF;
55005 @@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55006 ret = -ERESTARTSYS;
55007 break;
55008 }
55009 - if (!pipe->writers)
55010 + if (!atomic_read(&pipe->writers))
55011 break;
55012 - if (!pipe->waiting_writers) {
55013 + if (!atomic_read(&pipe->waiting_writers)) {
55014 if (flags & SPLICE_F_NONBLOCK) {
55015 ret = -EAGAIN;
55016 break;
55017 @@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55018 pipe_lock(pipe);
55019
55020 while (pipe->nrbufs >= PIPE_BUFFERS) {
55021 - if (!pipe->readers) {
55022 + if (!atomic_read(&pipe->readers)) {
55023 send_sig(SIGPIPE, current, 0);
55024 ret = -EPIPE;
55025 break;
55026 @@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55027 ret = -ERESTARTSYS;
55028 break;
55029 }
55030 - pipe->waiting_writers++;
55031 + atomic_inc(&pipe->waiting_writers);
55032 pipe_wait(pipe);
55033 - pipe->waiting_writers--;
55034 + atomic_dec(&pipe->waiting_writers);
55035 }
55036
55037 pipe_unlock(pipe);
55038 @@ -1786,14 +1792,14 @@ retry:
55039 pipe_double_lock(ipipe, opipe);
55040
55041 do {
55042 - if (!opipe->readers) {
55043 + if (!atomic_read(&opipe->readers)) {
55044 send_sig(SIGPIPE, current, 0);
55045 if (!ret)
55046 ret = -EPIPE;
55047 break;
55048 }
55049
55050 - if (!ipipe->nrbufs && !ipipe->writers)
55051 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
55052 break;
55053
55054 /*
55055 @@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55056 pipe_double_lock(ipipe, opipe);
55057
55058 do {
55059 - if (!opipe->readers) {
55060 + if (!atomic_read(&opipe->readers)) {
55061 send_sig(SIGPIPE, current, 0);
55062 if (!ret)
55063 ret = -EPIPE;
55064 @@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55065 * return EAGAIN if we have the potential of some data in the
55066 * future, otherwise just return 0
55067 */
55068 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
55069 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
55070 ret = -EAGAIN;
55071
55072 pipe_unlock(ipipe);
55073 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
55074 index e020183..18d64b4 100644
55075 --- a/fs/sysfs/dir.c
55076 +++ b/fs/sysfs/dir.c
55077 @@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
55078 struct sysfs_dirent *sd;
55079 int rc;
55080
55081 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
55082 + const char *parent_name = parent_sd->s_name;
55083 +
55084 + mode = S_IFDIR | S_IRWXU;
55085 +
55086 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
55087 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
55088 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
55089 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
55090 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
55091 +#endif
55092 +
55093 /* allocate */
55094 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
55095 if (!sd)
55096 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
55097 index 7118a38..70af853 100644
55098 --- a/fs/sysfs/file.c
55099 +++ b/fs/sysfs/file.c
55100 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
55101
55102 struct sysfs_open_dirent {
55103 atomic_t refcnt;
55104 - atomic_t event;
55105 + atomic_unchecked_t event;
55106 wait_queue_head_t poll;
55107 struct list_head buffers; /* goes through sysfs_buffer.list */
55108 };
55109 @@ -53,7 +53,7 @@ struct sysfs_buffer {
55110 size_t count;
55111 loff_t pos;
55112 char * page;
55113 - struct sysfs_ops * ops;
55114 + const struct sysfs_ops * ops;
55115 struct mutex mutex;
55116 int needs_read_fill;
55117 int event;
55118 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
55119 {
55120 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
55121 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
55122 - struct sysfs_ops * ops = buffer->ops;
55123 + const struct sysfs_ops * ops = buffer->ops;
55124 int ret = 0;
55125 ssize_t count;
55126
55127 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
55128 if (!sysfs_get_active_two(attr_sd))
55129 return -ENODEV;
55130
55131 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
55132 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
55133 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
55134
55135 sysfs_put_active_two(attr_sd);
55136 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
55137 {
55138 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
55139 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
55140 - struct sysfs_ops * ops = buffer->ops;
55141 + const struct sysfs_ops * ops = buffer->ops;
55142 int rc;
55143
55144 /* need attr_sd for attr and ops, its parent for kobj */
55145 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
55146 return -ENOMEM;
55147
55148 atomic_set(&new_od->refcnt, 0);
55149 - atomic_set(&new_od->event, 1);
55150 + atomic_set_unchecked(&new_od->event, 1);
55151 init_waitqueue_head(&new_od->poll);
55152 INIT_LIST_HEAD(&new_od->buffers);
55153 goto retry;
55154 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
55155 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
55156 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
55157 struct sysfs_buffer *buffer;
55158 - struct sysfs_ops *ops;
55159 + const struct sysfs_ops *ops;
55160 int error = -EACCES;
55161 char *p;
55162
55163 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
55164
55165 sysfs_put_active_two(attr_sd);
55166
55167 - if (buffer->event != atomic_read(&od->event))
55168 + if (buffer->event != atomic_read_unchecked(&od->event))
55169 goto trigger;
55170
55171 return DEFAULT_POLLMASK;
55172 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
55173
55174 od = sd->s_attr.open;
55175 if (od) {
55176 - atomic_inc(&od->event);
55177 + atomic_inc_unchecked(&od->event);
55178 wake_up_interruptible(&od->poll);
55179 }
55180
55181 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
55182 index c5081ad..342ea86 100644
55183 --- a/fs/sysfs/symlink.c
55184 +++ b/fs/sysfs/symlink.c
55185 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
55186
55187 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
55188 {
55189 - char *page = nd_get_link(nd);
55190 + const char *page = nd_get_link(nd);
55191 if (!IS_ERR(page))
55192 free_page((unsigned long)page);
55193 }
55194 diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
55195 index 1e06853..b06d325 100644
55196 --- a/fs/udf/balloc.c
55197 +++ b/fs/udf/balloc.c
55198 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
55199
55200 mutex_lock(&sbi->s_alloc_mutex);
55201 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
55202 - if (bloc->logicalBlockNum < 0 ||
55203 - (bloc->logicalBlockNum + count) >
55204 - partmap->s_partition_len) {
55205 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
55206 udf_debug("%d < %d || %d + %d > %d\n",
55207 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
55208 count, partmap->s_partition_len);
55209 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
55210
55211 mutex_lock(&sbi->s_alloc_mutex);
55212 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
55213 - if (bloc->logicalBlockNum < 0 ||
55214 - (bloc->logicalBlockNum + count) >
55215 - partmap->s_partition_len) {
55216 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
55217 udf_debug("%d < %d || %d + %d > %d\n",
55218 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
55219 partmap->s_partition_len);
55220 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
55221 index 6d24c2c..fff470f 100644
55222 --- a/fs/udf/inode.c
55223 +++ b/fs/udf/inode.c
55224 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
55225 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
55226 int lastblock = 0;
55227
55228 + pax_track_stack();
55229 +
55230 prev_epos.offset = udf_file_entry_alloc_offset(inode);
55231 prev_epos.block = iinfo->i_location;
55232 prev_epos.bh = NULL;
55233 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
55234 index 9215700..bf1f68e 100644
55235 --- a/fs/udf/misc.c
55236 +++ b/fs/udf/misc.c
55237 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
55238
55239 u8 udf_tag_checksum(const struct tag *t)
55240 {
55241 - u8 *data = (u8 *)t;
55242 + const u8 *data = (const u8 *)t;
55243 u8 checksum = 0;
55244 int i;
55245 for (i = 0; i < sizeof(struct tag); ++i)
55246 diff --git a/fs/utimes.c b/fs/utimes.c
55247 index e4c75db..b4df0e0 100644
55248 --- a/fs/utimes.c
55249 +++ b/fs/utimes.c
55250 @@ -1,6 +1,7 @@
55251 #include <linux/compiler.h>
55252 #include <linux/file.h>
55253 #include <linux/fs.h>
55254 +#include <linux/security.h>
55255 #include <linux/linkage.h>
55256 #include <linux/mount.h>
55257 #include <linux/namei.h>
55258 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
55259 goto mnt_drop_write_and_out;
55260 }
55261 }
55262 +
55263 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
55264 + error = -EACCES;
55265 + goto mnt_drop_write_and_out;
55266 + }
55267 +
55268 mutex_lock(&inode->i_mutex);
55269 error = notify_change(path->dentry, &newattrs);
55270 mutex_unlock(&inode->i_mutex);
55271 diff --git a/fs/xattr.c b/fs/xattr.c
55272 index 6d4f6d3..cda3958 100644
55273 --- a/fs/xattr.c
55274 +++ b/fs/xattr.c
55275 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
55276 * Extended attribute SET operations
55277 */
55278 static long
55279 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
55280 +setxattr(struct path *path, const char __user *name, const void __user *value,
55281 size_t size, int flags)
55282 {
55283 int error;
55284 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
55285 return PTR_ERR(kvalue);
55286 }
55287
55288 - error = vfs_setxattr(d, kname, kvalue, size, flags);
55289 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
55290 + error = -EACCES;
55291 + goto out;
55292 + }
55293 +
55294 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
55295 +out:
55296 kfree(kvalue);
55297 return error;
55298 }
55299 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
55300 return error;
55301 error = mnt_want_write(path.mnt);
55302 if (!error) {
55303 - error = setxattr(path.dentry, name, value, size, flags);
55304 + error = setxattr(&path, name, value, size, flags);
55305 mnt_drop_write(path.mnt);
55306 }
55307 path_put(&path);
55308 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
55309 return error;
55310 error = mnt_want_write(path.mnt);
55311 if (!error) {
55312 - error = setxattr(path.dentry, name, value, size, flags);
55313 + error = setxattr(&path, name, value, size, flags);
55314 mnt_drop_write(path.mnt);
55315 }
55316 path_put(&path);
55317 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
55318 const void __user *,value, size_t, size, int, flags)
55319 {
55320 struct file *f;
55321 - struct dentry *dentry;
55322 int error = -EBADF;
55323
55324 f = fget(fd);
55325 if (!f)
55326 return error;
55327 - dentry = f->f_path.dentry;
55328 - audit_inode(NULL, dentry);
55329 + audit_inode(NULL, f->f_path.dentry);
55330 error = mnt_want_write_file(f);
55331 if (!error) {
55332 - error = setxattr(dentry, name, value, size, flags);
55333 + error = setxattr(&f->f_path, name, value, size, flags);
55334 mnt_drop_write(f->f_path.mnt);
55335 }
55336 fput(f);
55337 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
55338 index c6ad7c7..f2847a7 100644
55339 --- a/fs/xattr_acl.c
55340 +++ b/fs/xattr_acl.c
55341 @@ -17,8 +17,8 @@
55342 struct posix_acl *
55343 posix_acl_from_xattr(const void *value, size_t size)
55344 {
55345 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
55346 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
55347 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
55348 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
55349 int count;
55350 struct posix_acl *acl;
55351 struct posix_acl_entry *acl_e;
55352 diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
55353 index 942362f..88f96f5 100644
55354 --- a/fs/xfs/linux-2.6/xfs_ioctl.c
55355 +++ b/fs/xfs/linux-2.6/xfs_ioctl.c
55356 @@ -134,7 +134,7 @@ xfs_find_handle(
55357 }
55358
55359 error = -EFAULT;
55360 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
55361 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
55362 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
55363 goto out_put;
55364
55365 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
55366 if (IS_ERR(dentry))
55367 return PTR_ERR(dentry);
55368
55369 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
55370 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
55371 if (!kbuf)
55372 goto out_dput;
55373
55374 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
55375 xfs_mount_t *mp,
55376 void __user *arg)
55377 {
55378 - xfs_fsop_geom_t fsgeo;
55379 + xfs_fsop_geom_t fsgeo;
55380 int error;
55381
55382 error = xfs_fs_geometry(mp, &fsgeo, 3);
55383 diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
55384 index bad485a..479bd32 100644
55385 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c
55386 +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
55387 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
55388 xfs_fsop_geom_t fsgeo;
55389 int error;
55390
55391 + memset(&fsgeo, 0, sizeof(fsgeo));
55392 error = xfs_fs_geometry(mp, &fsgeo, 3);
55393 if (error)
55394 return -error;
55395 diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
55396 index 1f3b4b8..6102f6d 100644
55397 --- a/fs/xfs/linux-2.6/xfs_iops.c
55398 +++ b/fs/xfs/linux-2.6/xfs_iops.c
55399 @@ -468,7 +468,7 @@ xfs_vn_put_link(
55400 struct nameidata *nd,
55401 void *p)
55402 {
55403 - char *s = nd_get_link(nd);
55404 + const char *s = nd_get_link(nd);
55405
55406 if (!IS_ERR(s))
55407 kfree(s);
55408 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
55409 index 8971fb0..5fc1eb2 100644
55410 --- a/fs/xfs/xfs_bmap.c
55411 +++ b/fs/xfs/xfs_bmap.c
55412 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
55413 int nmap,
55414 int ret_nmap);
55415 #else
55416 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
55417 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
55418 #endif /* DEBUG */
55419
55420 #if defined(XFS_RW_TRACE)
55421 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
55422 index e89734e..5e84d8d 100644
55423 --- a/fs/xfs/xfs_dir2_sf.c
55424 +++ b/fs/xfs/xfs_dir2_sf.c
55425 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
55426 }
55427
55428 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
55429 - if (filldir(dirent, sfep->name, sfep->namelen,
55430 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
55431 + char name[sfep->namelen];
55432 + memcpy(name, sfep->name, sfep->namelen);
55433 + if (filldir(dirent, name, sfep->namelen,
55434 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
55435 + *offset = off & 0x7fffffff;
55436 + return 0;
55437 + }
55438 + } else if (filldir(dirent, sfep->name, sfep->namelen,
55439 off & 0x7fffffff, ino, DT_UNKNOWN)) {
55440 *offset = off & 0x7fffffff;
55441 return 0;
55442 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
55443 index 8f32f50..b6a41e8 100644
55444 --- a/fs/xfs/xfs_vnodeops.c
55445 +++ b/fs/xfs/xfs_vnodeops.c
55446 @@ -564,13 +564,18 @@ xfs_readlink(
55447
55448 xfs_ilock(ip, XFS_ILOCK_SHARED);
55449
55450 - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
55451 - ASSERT(ip->i_d.di_size <= MAXPATHLEN);
55452 -
55453 pathlen = ip->i_d.di_size;
55454 if (!pathlen)
55455 goto out;
55456
55457 + if (pathlen > MAXPATHLEN) {
55458 + xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
55459 + __func__, (unsigned long long)ip->i_ino, pathlen);
55460 + ASSERT(0);
55461 + error = XFS_ERROR(EFSCORRUPTED);
55462 + goto out;
55463 + }
55464 +
55465 if (ip->i_df.if_flags & XFS_IFINLINE) {
55466 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
55467 link[pathlen] = '\0';
55468 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
55469 new file mode 100644
55470 index 0000000..7026cbd
55471 --- /dev/null
55472 +++ b/grsecurity/Kconfig
55473 @@ -0,0 +1,1074 @@
55474 +#
55475 +# grecurity configuration
55476 +#
55477 +
55478 +menu "Grsecurity"
55479 +
55480 +config GRKERNSEC
55481 + bool "Grsecurity"
55482 + select CRYPTO
55483 + select CRYPTO_SHA256
55484 + help
55485 + If you say Y here, you will be able to configure many features
55486 + that will enhance the security of your system. It is highly
55487 + recommended that you say Y here and read through the help
55488 + for each option so that you fully understand the features and
55489 + can evaluate their usefulness for your machine.
55490 +
55491 +choice
55492 + prompt "Security Level"
55493 + depends on GRKERNSEC
55494 + default GRKERNSEC_CUSTOM
55495 +
55496 +config GRKERNSEC_LOW
55497 + bool "Low"
55498 + select GRKERNSEC_LINK
55499 + select GRKERNSEC_FIFO
55500 + select GRKERNSEC_RANDNET
55501 + select GRKERNSEC_DMESG
55502 + select GRKERNSEC_CHROOT
55503 + select GRKERNSEC_CHROOT_CHDIR
55504 +
55505 + help
55506 + If you choose this option, several of the grsecurity options will
55507 + be enabled that will give you greater protection against a number
55508 + of attacks, while assuring that none of your software will have any
55509 + conflicts with the additional security measures. If you run a lot
55510 + of unusual software, or you are having problems with the higher
55511 + security levels, you should say Y here. With this option, the
55512 + following features are enabled:
55513 +
55514 + - Linking restrictions
55515 + - FIFO restrictions
55516 + - Restricted dmesg
55517 + - Enforced chdir("/") on chroot
55518 + - Runtime module disabling
55519 +
55520 +config GRKERNSEC_MEDIUM
55521 + bool "Medium"
55522 + select PAX
55523 + select PAX_EI_PAX
55524 + select PAX_PT_PAX_FLAGS
55525 + select PAX_HAVE_ACL_FLAGS
55526 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55527 + select GRKERNSEC_CHROOT
55528 + select GRKERNSEC_CHROOT_SYSCTL
55529 + select GRKERNSEC_LINK
55530 + select GRKERNSEC_FIFO
55531 + select GRKERNSEC_DMESG
55532 + select GRKERNSEC_RANDNET
55533 + select GRKERNSEC_FORKFAIL
55534 + select GRKERNSEC_TIME
55535 + select GRKERNSEC_SIGNAL
55536 + select GRKERNSEC_CHROOT
55537 + select GRKERNSEC_CHROOT_UNIX
55538 + select GRKERNSEC_CHROOT_MOUNT
55539 + select GRKERNSEC_CHROOT_PIVOT
55540 + select GRKERNSEC_CHROOT_DOUBLE
55541 + select GRKERNSEC_CHROOT_CHDIR
55542 + select GRKERNSEC_CHROOT_MKNOD
55543 + select GRKERNSEC_PROC
55544 + select GRKERNSEC_PROC_USERGROUP
55545 + select PAX_RANDUSTACK
55546 + select PAX_ASLR
55547 + select PAX_RANDMMAP
55548 + select PAX_REFCOUNT if (X86 || SPARC64)
55549 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55550 +
55551 + help
55552 + If you say Y here, several features in addition to those included
55553 + in the low additional security level will be enabled. These
55554 + features provide even more security to your system, though in rare
55555 + cases they may be incompatible with very old or poorly written
55556 + software. If you enable this option, make sure that your auth
55557 + service (identd) is running as gid 1001. With this option,
55558 + the following features (in addition to those provided in the
55559 + low additional security level) will be enabled:
55560 +
55561 + - Failed fork logging
55562 + - Time change logging
55563 + - Signal logging
55564 + - Deny mounts in chroot
55565 + - Deny double chrooting
55566 + - Deny sysctl writes in chroot
55567 + - Deny mknod in chroot
55568 + - Deny access to abstract AF_UNIX sockets out of chroot
55569 + - Deny pivot_root in chroot
55570 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
55571 + - /proc restrictions with special GID set to 10 (usually wheel)
55572 + - Address Space Layout Randomization (ASLR)
55573 + - Prevent exploitation of most refcount overflows
55574 + - Bounds checking of copying between the kernel and userland
55575 +
55576 +config GRKERNSEC_HIGH
55577 + bool "High"
55578 + select GRKERNSEC_LINK
55579 + select GRKERNSEC_FIFO
55580 + select GRKERNSEC_DMESG
55581 + select GRKERNSEC_FORKFAIL
55582 + select GRKERNSEC_TIME
55583 + select GRKERNSEC_SIGNAL
55584 + select GRKERNSEC_CHROOT
55585 + select GRKERNSEC_CHROOT_SHMAT
55586 + select GRKERNSEC_CHROOT_UNIX
55587 + select GRKERNSEC_CHROOT_MOUNT
55588 + select GRKERNSEC_CHROOT_FCHDIR
55589 + select GRKERNSEC_CHROOT_PIVOT
55590 + select GRKERNSEC_CHROOT_DOUBLE
55591 + select GRKERNSEC_CHROOT_CHDIR
55592 + select GRKERNSEC_CHROOT_MKNOD
55593 + select GRKERNSEC_CHROOT_CAPS
55594 + select GRKERNSEC_CHROOT_SYSCTL
55595 + select GRKERNSEC_CHROOT_FINDTASK
55596 + select GRKERNSEC_SYSFS_RESTRICT
55597 + select GRKERNSEC_PROC
55598 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55599 + select GRKERNSEC_HIDESYM
55600 + select GRKERNSEC_BRUTE
55601 + select GRKERNSEC_PROC_USERGROUP
55602 + select GRKERNSEC_KMEM
55603 + select GRKERNSEC_RESLOG
55604 + select GRKERNSEC_RANDNET
55605 + select GRKERNSEC_PROC_ADD
55606 + select GRKERNSEC_CHROOT_CHMOD
55607 + select GRKERNSEC_CHROOT_NICE
55608 + select GRKERNSEC_SETXID
55609 + select GRKERNSEC_AUDIT_MOUNT
55610 + select GRKERNSEC_MODHARDEN if (MODULES)
55611 + select GRKERNSEC_HARDEN_PTRACE
55612 + select GRKERNSEC_PTRACE_READEXEC
55613 + select GRKERNSEC_VM86 if (X86_32)
55614 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55615 + select PAX
55616 + select PAX_RANDUSTACK
55617 + select PAX_ASLR
55618 + select PAX_RANDMMAP
55619 + select PAX_NOEXEC
55620 + select PAX_MPROTECT
55621 + select PAX_EI_PAX
55622 + select PAX_PT_PAX_FLAGS
55623 + select PAX_HAVE_ACL_FLAGS
55624 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55625 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
55626 + select PAX_RANDKSTACK if (X86_TSC && X86)
55627 + select PAX_SEGMEXEC if (X86_32)
55628 + select PAX_PAGEEXEC
55629 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55630 + select PAX_EMUTRAMP if (PARISC)
55631 + select PAX_EMUSIGRT if (PARISC)
55632 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55633 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55634 + select PAX_REFCOUNT if (X86 || SPARC64)
55635 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55636 + help
55637 + If you say Y here, many of the features of grsecurity will be
55638 + enabled, which will protect you against many kinds of attacks
55639 + against your system. The heightened security comes at a cost
55640 + of an increased chance of incompatibilities with rare software
55641 + on your machine. Since this security level enables PaX, you should
55642 + view <http://pax.grsecurity.net> and read about the PaX
55643 + project. While you are there, download chpax and run it on
55644 + binaries that cause problems with PaX. Also remember that
55645 + since the /proc restrictions are enabled, you must run your
55646 + identd as gid 1001. This security level enables the following
55647 + features in addition to those listed in the low and medium
55648 + security levels:
55649 +
55650 + - Additional /proc restrictions
55651 + - Chmod restrictions in chroot
55652 + - No signals, ptrace, or viewing of processes outside of chroot
55653 + - Capability restrictions in chroot
55654 + - Deny fchdir out of chroot
55655 + - Priority restrictions in chroot
55656 + - Segmentation-based implementation of PaX
55657 + - Mprotect restrictions
55658 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55659 + - Kernel stack randomization
55660 + - Mount/unmount/remount logging
55661 + - Kernel symbol hiding
55662 + - Hardening of module auto-loading
55663 + - Ptrace restrictions
55664 + - Restricted vm86 mode
55665 + - Restricted sysfs/debugfs
55666 + - Active kernel exploit response
55667 +
55668 +config GRKERNSEC_CUSTOM
55669 + bool "Custom"
55670 + help
55671 + If you say Y here, you will be able to configure every grsecurity
55672 + option, which allows you to enable many more features that aren't
55673 + covered in the basic security levels. These additional features
55674 + include TPE, socket restrictions, and the sysctl system for
55675 + grsecurity. It is advised that you read through the help for
55676 + each option to determine its usefulness in your situation.
55677 +
55678 +endchoice
55679 +
55680 +menu "Memory Protections"
55681 +depends on GRKERNSEC
55682 +
55683 +config GRKERNSEC_KMEM
55684 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55685 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55686 + help
55687 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55688 + be written to or read from to modify or leak the contents of the running
55689 + kernel. /dev/port will also not be allowed to be opened. If you have module
55690 + support disabled, enabling this will close up four ways that are
55691 + currently used to insert malicious code into the running kernel.
55692 + Even with all these features enabled, we still highly recommend that
55693 + you use the RBAC system, as it is still possible for an attacker to
55694 + modify the running kernel through privileged I/O granted by ioperm/iopl.
55695 + If you are not using XFree86, you may be able to stop this additional
55696 + case by enabling the 'Disable privileged I/O' option. Though nothing
55697 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55698 + but only to video memory, which is the only writing we allow in this
55699 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55700 + not be allowed to mprotect it with PROT_WRITE later.
55701 + It is highly recommended that you say Y here if you meet all the
55702 + conditions above.
55703 +
55704 +config GRKERNSEC_VM86
55705 + bool "Restrict VM86 mode"
55706 + depends on X86_32
55707 +
55708 + help
55709 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55710 + make use of a special execution mode on 32bit x86 processors called
55711 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55712 + video cards and will still work with this option enabled. The purpose
55713 + of the option is to prevent exploitation of emulation errors in
55714 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
55715 + Nearly all users should be able to enable this option.
55716 +
55717 +config GRKERNSEC_IO
55718 + bool "Disable privileged I/O"
55719 + depends on X86
55720 + select RTC_CLASS
55721 + select RTC_INTF_DEV
55722 + select RTC_DRV_CMOS
55723 +
55724 + help
55725 + If you say Y here, all ioperm and iopl calls will return an error.
55726 + Ioperm and iopl can be used to modify the running kernel.
55727 + Unfortunately, some programs need this access to operate properly,
55728 + the most notable of which are XFree86 and hwclock. hwclock can be
55729 + remedied by having RTC support in the kernel, so real-time
55730 + clock support is enabled if this option is enabled, to ensure
55731 + that hwclock operates correctly. XFree86 still will not
55732 + operate correctly with this option enabled, so DO NOT CHOOSE Y
55733 + IF YOU USE XFree86. If you use XFree86 and you still want to
55734 + protect your kernel against modification, use the RBAC system.
55735 +
55736 +config GRKERNSEC_PROC_MEMMAP
55737 + bool "Harden ASLR against information leaks and entropy reduction"
55738 + default y if (PAX_NOEXEC || PAX_ASLR)
55739 + depends on PAX_NOEXEC || PAX_ASLR
55740 + help
55741 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55742 + give no information about the addresses of its mappings if
55743 + PaX features that rely on random addresses are enabled on the task.
55744 + In addition to sanitizing this information and disabling other
55745 + dangerous sources of information, this option causes reads of sensitive
55746 + /proc/<pid> entries where the file descriptor was opened in a different
55747 + task than the one performing the read. Such attempts are logged.
55748 + Finally, this option limits argv/env strings for suid/sgid binaries
55749 + to 1MB to prevent a complete exhaustion of the stack entropy provided
55750 + by ASLR.
55751 + If you use PaX it is essential that you say Y here as it closes up
55752 + several holes that make full ASLR useless for suid/sgid binaries.
55753 +
55754 +config GRKERNSEC_BRUTE
55755 + bool "Deter exploit bruteforcing"
55756 + help
55757 + If you say Y here, attempts to bruteforce exploits against forking
55758 + daemons such as apache or sshd, as well as against suid/sgid binaries
55759 + will be deterred. When a child of a forking daemon is killed by PaX
55760 + or crashes due to an illegal instruction or other suspicious signal,
55761 + the parent process will be delayed 30 seconds upon every subsequent
55762 + fork until the administrator is able to assess the situation and
55763 + restart the daemon.
55764 + In the suid/sgid case, the attempt is logged, the user has all their
55765 + processes terminated, and they are prevented from executing any further
55766 + processes for 15 minutes.
55767 + It is recommended that you also enable signal logging in the auditing
55768 + section so that logs are generated when a process triggers a suspicious
55769 + signal.
55770 + If the sysctl option is enabled, a sysctl option with name
55771 + "deter_bruteforce" is created.
55772 +
55773 +config GRKERNSEC_MODHARDEN
55774 + bool "Harden module auto-loading"
55775 + depends on MODULES
55776 + help
55777 + If you say Y here, module auto-loading in response to use of some
55778 + feature implemented by an unloaded module will be restricted to
55779 + root users. Enabling this option helps defend against attacks
55780 + by unprivileged users who abuse the auto-loading behavior to
55781 + cause a vulnerable module to load that is then exploited.
55782 +
55783 + If this option prevents a legitimate use of auto-loading for a
55784 + non-root user, the administrator can execute modprobe manually
55785 + with the exact name of the module mentioned in the alert log.
55786 + Alternatively, the administrator can add the module to the list
55787 + of modules loaded at boot by modifying init scripts.
55788 +
55789 + Modification of init scripts will most likely be needed on
55790 + Ubuntu servers with encrypted home directory support enabled,
55791 + as the first non-root user logging in will cause the ecb(aes),
55792 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55793 +
55794 +config GRKERNSEC_HIDESYM
55795 + bool "Hide kernel symbols"
55796 + help
55797 + If you say Y here, getting information on loaded modules, and
55798 + displaying all kernel symbols through a syscall will be restricted
55799 + to users with CAP_SYS_MODULE. For software compatibility reasons,
55800 + /proc/kallsyms will be restricted to the root user. The RBAC
55801 + system can hide that entry even from root.
55802 +
55803 + This option also prevents leaking of kernel addresses through
55804 + several /proc entries.
55805 +
55806 + Note that this option is only effective provided the following
55807 + conditions are met:
55808 + 1) The kernel using grsecurity is not precompiled by some distribution
55809 + 2) You have also enabled GRKERNSEC_DMESG
55810 + 3) You are using the RBAC system and hiding other files such as your
55811 + kernel image and System.map. Alternatively, enabling this option
55812 + causes the permissions on /boot, /lib/modules, and the kernel
55813 + source directory to change at compile time to prevent
55814 + reading by non-root users.
55815 + If the above conditions are met, this option will aid in providing a
55816 + useful protection against local kernel exploitation of overflows
55817 + and arbitrary read/write vulnerabilities.
55818 +
55819 +config GRKERNSEC_KERN_LOCKOUT
55820 + bool "Active kernel exploit response"
55821 + depends on X86 || ARM || PPC || SPARC
55822 + help
55823 + If you say Y here, when a PaX alert is triggered due to suspicious
55824 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55825 + or an OOPs occurs due to bad memory accesses, instead of just
55826 + terminating the offending process (and potentially allowing
55827 + a subsequent exploit from the same user), we will take one of two
55828 + actions:
55829 + If the user was root, we will panic the system
55830 + If the user was non-root, we will log the attempt, terminate
55831 + all processes owned by the user, then prevent them from creating
55832 + any new processes until the system is restarted
55833 + This deters repeated kernel exploitation/bruteforcing attempts
55834 + and is useful for later forensics.
55835 +
55836 +endmenu
55837 +menu "Role Based Access Control Options"
55838 +depends on GRKERNSEC
55839 +
55840 +config GRKERNSEC_RBAC_DEBUG
55841 + bool
55842 +
55843 +config GRKERNSEC_NO_RBAC
55844 + bool "Disable RBAC system"
55845 + help
55846 + If you say Y here, the /dev/grsec device will be removed from the kernel,
55847 + preventing the RBAC system from being enabled. You should only say Y
55848 + here if you have no intention of using the RBAC system, so as to prevent
55849 + an attacker with root access from misusing the RBAC system to hide files
55850 + and processes when loadable module support and /dev/[k]mem have been
55851 + locked down.
55852 +
55853 +config GRKERNSEC_ACL_HIDEKERN
55854 + bool "Hide kernel processes"
55855 + help
55856 + If you say Y here, all kernel threads will be hidden to all
55857 + processes but those whose subject has the "view hidden processes"
55858 + flag.
55859 +
55860 +config GRKERNSEC_ACL_MAXTRIES
55861 + int "Maximum tries before password lockout"
55862 + default 3
55863 + help
55864 + This option enforces the maximum number of times a user can attempt
55865 + to authorize themselves with the grsecurity RBAC system before being
55866 + denied the ability to attempt authorization again for a specified time.
55867 + The lower the number, the harder it will be to brute-force a password.
55868 +
55869 +config GRKERNSEC_ACL_TIMEOUT
55870 + int "Time to wait after max password tries, in seconds"
55871 + default 30
55872 + help
55873 + This option specifies the time the user must wait after attempting to
55874 + authorize to the RBAC system with the maximum number of invalid
55875 + passwords. The higher the number, the harder it will be to brute-force
55876 + a password.
55877 +
55878 +endmenu
55879 +menu "Filesystem Protections"
55880 +depends on GRKERNSEC
55881 +
55882 +config GRKERNSEC_PROC
55883 + bool "Proc restrictions"
55884 + help
55885 + If you say Y here, the permissions of the /proc filesystem
55886 + will be altered to enhance system security and privacy. You MUST
55887 + choose either a user only restriction or a user and group restriction.
55888 + Depending upon the option you choose, you can either restrict users to
55889 + see only the processes they themselves run, or choose a group that can
55890 + view all processes and files normally restricted to root if you choose
55891 + the "restrict to user only" option. NOTE: If you're running identd as
55892 + a non-root user, you will have to run it as the group you specify here.
55893 +
55894 +config GRKERNSEC_PROC_USER
55895 + bool "Restrict /proc to user only"
55896 + depends on GRKERNSEC_PROC
55897 + help
55898 + If you say Y here, non-root users will only be able to view their own
55899 + processes, and restricts them from viewing network-related information,
55900 + and viewing kernel symbol and module information.
55901 +
55902 +config GRKERNSEC_PROC_USERGROUP
55903 + bool "Allow special group"
55904 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55905 + help
55906 + If you say Y here, you will be able to select a group that will be
55907 + able to view all processes and network-related information. If you've
55908 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55909 + remain hidden. This option is useful if you want to run identd as
55910 + a non-root user.
55911 +
55912 +config GRKERNSEC_PROC_GID
55913 + int "GID for special group"
55914 + depends on GRKERNSEC_PROC_USERGROUP
55915 + default 1001
55916 +
55917 +config GRKERNSEC_PROC_ADD
55918 + bool "Additional restrictions"
55919 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55920 + help
55921 + If you say Y here, additional restrictions will be placed on
55922 + /proc that keep normal users from viewing device information and
55923 + slabinfo information that could be useful for exploits.
55924 +
55925 +config GRKERNSEC_LINK
55926 + bool "Linking restrictions"
55927 + help
55928 + If you say Y here, /tmp race exploits will be prevented, since users
55929 + will no longer be able to follow symlinks owned by other users in
55930 + world-writable +t directories (e.g. /tmp), unless the owner of the
55931 + symlink is the owner of the directory. users will also not be
55932 + able to hardlink to files they do not own. If the sysctl option is
55933 + enabled, a sysctl option with name "linking_restrictions" is created.
55934 +
55935 +config GRKERNSEC_FIFO
55936 + bool "FIFO restrictions"
55937 + help
55938 + If you say Y here, users will not be able to write to FIFOs they don't
55939 + own in world-writable +t directories (e.g. /tmp), unless the owner of
55940 + the FIFO is the same owner of the directory it's held in. If the sysctl
55941 + option is enabled, a sysctl option with name "fifo_restrictions" is
55942 + created.
55943 +
55944 +config GRKERNSEC_SYSFS_RESTRICT
55945 + bool "Sysfs/debugfs restriction"
55946 + depends on SYSFS
55947 + help
55948 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55949 + any filesystem normally mounted under it (e.g. debugfs) will be
55950 + mostly accessible only by root. These filesystems generally provide access
55951 + to hardware and debug information that isn't appropriate for unprivileged
55952 + users of the system. Sysfs and debugfs have also become a large source
55953 + of new vulnerabilities, ranging from infoleaks to local compromise.
55954 + There has been very little oversight with an eye toward security involved
55955 + in adding new exporters of information to these filesystems, so their
55956 + use is discouraged.
55957 + For reasons of compatibility, a few directories have been whitelisted
55958 + for access by non-root users:
55959 + /sys/fs/selinux
55960 + /sys/fs/fuse
55961 + /sys/devices/system/cpu
55962 +
55963 +config GRKERNSEC_ROFS
55964 + bool "Runtime read-only mount protection"
55965 + help
55966 + If you say Y here, a sysctl option with name "romount_protect" will
55967 + be created. By setting this option to 1 at runtime, filesystems
55968 + will be protected in the following ways:
55969 + * No new writable mounts will be allowed
55970 + * Existing read-only mounts won't be able to be remounted read/write
55971 + * Write operations will be denied on all block devices
55972 + This option acts independently of grsec_lock: once it is set to 1,
55973 + it cannot be turned off. Therefore, please be mindful of the resulting
55974 + behavior if this option is enabled in an init script on a read-only
55975 + filesystem. This feature is mainly intended for secure embedded systems.
55976 +
55977 +config GRKERNSEC_CHROOT
55978 + bool "Chroot jail restrictions"
55979 + help
55980 + If you say Y here, you will be able to choose several options that will
55981 + make breaking out of a chrooted jail much more difficult. If you
55982 + encounter no software incompatibilities with the following options, it
55983 + is recommended that you enable each one.
55984 +
55985 +config GRKERNSEC_CHROOT_MOUNT
55986 + bool "Deny mounts"
55987 + depends on GRKERNSEC_CHROOT
55988 + help
55989 + If you say Y here, processes inside a chroot will not be able to
55990 + mount or remount filesystems. If the sysctl option is enabled, a
55991 + sysctl option with name "chroot_deny_mount" is created.
55992 +
55993 +config GRKERNSEC_CHROOT_DOUBLE
55994 + bool "Deny double-chroots"
55995 + depends on GRKERNSEC_CHROOT
55996 + help
55997 + If you say Y here, processes inside a chroot will not be able to chroot
55998 + again outside the chroot. This is a widely used method of breaking
55999 + out of a chroot jail and should not be allowed. If the sysctl
56000 + option is enabled, a sysctl option with name
56001 + "chroot_deny_chroot" is created.
56002 +
56003 +config GRKERNSEC_CHROOT_PIVOT
56004 + bool "Deny pivot_root in chroot"
56005 + depends on GRKERNSEC_CHROOT
56006 + help
56007 + If you say Y here, processes inside a chroot will not be able to use
56008 + a function called pivot_root() that was introduced in Linux 2.3.41. It
56009 + works similar to chroot in that it changes the root filesystem. This
56010 + function could be misused in a chrooted process to attempt to break out
56011 + of the chroot, and therefore should not be allowed. If the sysctl
56012 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
56013 + created.
56014 +
56015 +config GRKERNSEC_CHROOT_CHDIR
56016 + bool "Enforce chdir(\"/\") on all chroots"
56017 + depends on GRKERNSEC_CHROOT
56018 + help
56019 + If you say Y here, the current working directory of all newly-chrooted
56020 + applications will be set to the the root directory of the chroot.
56021 + The man page on chroot(2) states:
56022 + Note that this call does not change the current working
56023 + directory, so that `.' can be outside the tree rooted at
56024 + `/'. In particular, the super-user can escape from a
56025 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56026 +
56027 + It is recommended that you say Y here, since it's not known to break
56028 + any software. If the sysctl option is enabled, a sysctl option with
56029 + name "chroot_enforce_chdir" is created.
56030 +
56031 +config GRKERNSEC_CHROOT_CHMOD
56032 + bool "Deny (f)chmod +s"
56033 + depends on GRKERNSEC_CHROOT
56034 + help
56035 + If you say Y here, processes inside a chroot will not be able to chmod
56036 + or fchmod files to make them have suid or sgid bits. This protects
56037 + against another published method of breaking a chroot. If the sysctl
56038 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
56039 + created.
56040 +
56041 +config GRKERNSEC_CHROOT_FCHDIR
56042 + bool "Deny fchdir out of chroot"
56043 + depends on GRKERNSEC_CHROOT
56044 + help
56045 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
56046 + to a file descriptor of the chrooting process that points to a directory
56047 + outside the filesystem will be stopped. If the sysctl option
56048 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56049 +
56050 +config GRKERNSEC_CHROOT_MKNOD
56051 + bool "Deny mknod"
56052 + depends on GRKERNSEC_CHROOT
56053 + help
56054 + If you say Y here, processes inside a chroot will not be allowed to
56055 + mknod. The problem with using mknod inside a chroot is that it
56056 + would allow an attacker to create a device entry that is the same
56057 + as one on the physical root of your system, which could range from
56058 + anything from the console device to a device for your harddrive (which
56059 + they could then use to wipe the drive or steal data). It is recommended
56060 + that you say Y here, unless you run into software incompatibilities.
56061 + If the sysctl option is enabled, a sysctl option with name
56062 + "chroot_deny_mknod" is created.
56063 +
56064 +config GRKERNSEC_CHROOT_SHMAT
56065 + bool "Deny shmat() out of chroot"
56066 + depends on GRKERNSEC_CHROOT
56067 + help
56068 + If you say Y here, processes inside a chroot will not be able to attach
56069 + to shared memory segments that were created outside of the chroot jail.
56070 + It is recommended that you say Y here. If the sysctl option is enabled,
56071 + a sysctl option with name "chroot_deny_shmat" is created.
56072 +
56073 +config GRKERNSEC_CHROOT_UNIX
56074 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
56075 + depends on GRKERNSEC_CHROOT
56076 + help
56077 + If you say Y here, processes inside a chroot will not be able to
56078 + connect to abstract (meaning not belonging to a filesystem) Unix
56079 + domain sockets that were bound outside of a chroot. It is recommended
56080 + that you say Y here. If the sysctl option is enabled, a sysctl option
56081 + with name "chroot_deny_unix" is created.
56082 +
56083 +config GRKERNSEC_CHROOT_FINDTASK
56084 + bool "Protect outside processes"
56085 + depends on GRKERNSEC_CHROOT
56086 + help
56087 + If you say Y here, processes inside a chroot will not be able to
56088 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56089 + getsid, or view any process outside of the chroot. If the sysctl
56090 + option is enabled, a sysctl option with name "chroot_findtask" is
56091 + created.
56092 +
56093 +config GRKERNSEC_CHROOT_NICE
56094 + bool "Restrict priority changes"
56095 + depends on GRKERNSEC_CHROOT
56096 + help
56097 + If you say Y here, processes inside a chroot will not be able to raise
56098 + the priority of processes in the chroot, or alter the priority of
56099 + processes outside the chroot. This provides more security than simply
56100 + removing CAP_SYS_NICE from the process' capability set. If the
56101 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56102 + is created.
56103 +
56104 +config GRKERNSEC_CHROOT_SYSCTL
56105 + bool "Deny sysctl writes"
56106 + depends on GRKERNSEC_CHROOT
56107 + help
56108 + If you say Y here, an attacker in a chroot will not be able to
56109 + write to sysctl entries, either by sysctl(2) or through a /proc
56110 + interface. It is strongly recommended that you say Y here. If the
56111 + sysctl option is enabled, a sysctl option with name
56112 + "chroot_deny_sysctl" is created.
56113 +
56114 +config GRKERNSEC_CHROOT_CAPS
56115 + bool "Capability restrictions"
56116 + depends on GRKERNSEC_CHROOT
56117 + help
56118 + If you say Y here, the capabilities on all processes within a
56119 + chroot jail will be lowered to stop module insertion, raw i/o,
56120 + system and net admin tasks, rebooting the system, modifying immutable
56121 + files, modifying IPC owned by another, and changing the system time.
56122 + This is left an option because it can break some apps. Disable this
56123 + if your chrooted apps are having problems performing those kinds of
56124 + tasks. If the sysctl option is enabled, a sysctl option with
56125 + name "chroot_caps" is created.
56126 +
56127 +endmenu
56128 +menu "Kernel Auditing"
56129 +depends on GRKERNSEC
56130 +
56131 +config GRKERNSEC_AUDIT_GROUP
56132 + bool "Single group for auditing"
56133 + help
56134 + If you say Y here, the exec, chdir, and (un)mount logging features
56135 + will only operate on a group you specify. This option is recommended
56136 + if you only want to watch certain users instead of having a large
56137 + amount of logs from the entire system. If the sysctl option is enabled,
56138 + a sysctl option with name "audit_group" is created.
56139 +
56140 +config GRKERNSEC_AUDIT_GID
56141 + int "GID for auditing"
56142 + depends on GRKERNSEC_AUDIT_GROUP
56143 + default 1007
56144 +
56145 +config GRKERNSEC_EXECLOG
56146 + bool "Exec logging"
56147 + help
56148 + If you say Y here, all execve() calls will be logged (since the
56149 + other exec*() calls are frontends to execve(), all execution
56150 + will be logged). Useful for shell-servers that like to keep track
56151 + of their users. If the sysctl option is enabled, a sysctl option with
56152 + name "exec_logging" is created.
56153 + WARNING: This option when enabled will produce a LOT of logs, especially
56154 + on an active system.
56155 +
56156 +config GRKERNSEC_RESLOG
56157 + bool "Resource logging"
56158 + help
56159 + If you say Y here, all attempts to overstep resource limits will
56160 + be logged with the resource name, the requested size, and the current
56161 + limit. It is highly recommended that you say Y here. If the sysctl
56162 + option is enabled, a sysctl option with name "resource_logging" is
56163 + created. If the RBAC system is enabled, the sysctl value is ignored.
56164 +
56165 +config GRKERNSEC_CHROOT_EXECLOG
56166 + bool "Log execs within chroot"
56167 + help
56168 + If you say Y here, all executions inside a chroot jail will be logged
56169 + to syslog. This can cause a large amount of logs if certain
56170 + applications (eg. djb's daemontools) are installed on the system, and
56171 + is therefore left as an option. If the sysctl option is enabled, a
56172 + sysctl option with name "chroot_execlog" is created.
56173 +
56174 +config GRKERNSEC_AUDIT_PTRACE
56175 + bool "Ptrace logging"
56176 + help
56177 + If you say Y here, all attempts to attach to a process via ptrace
56178 + will be logged. If the sysctl option is enabled, a sysctl option
56179 + with name "audit_ptrace" is created.
56180 +
56181 +config GRKERNSEC_AUDIT_CHDIR
56182 + bool "Chdir logging"
56183 + help
56184 + If you say Y here, all chdir() calls will be logged. If the sysctl
56185 + option is enabled, a sysctl option with name "audit_chdir" is created.
56186 +
56187 +config GRKERNSEC_AUDIT_MOUNT
56188 + bool "(Un)Mount logging"
56189 + help
56190 + If you say Y here, all mounts and unmounts will be logged. If the
56191 + sysctl option is enabled, a sysctl option with name "audit_mount" is
56192 + created.
56193 +
56194 +config GRKERNSEC_SIGNAL
56195 + bool "Signal logging"
56196 + help
56197 + If you say Y here, certain important signals will be logged, such as
56198 + SIGSEGV, which will as a result inform you of when a error in a program
56199 + occurred, which in some cases could mean a possible exploit attempt.
56200 + If the sysctl option is enabled, a sysctl option with name
56201 + "signal_logging" is created.
56202 +
56203 +config GRKERNSEC_FORKFAIL
56204 + bool "Fork failure logging"
56205 + help
56206 + If you say Y here, all failed fork() attempts will be logged.
56207 + This could suggest a fork bomb, or someone attempting to overstep
56208 + their process limit. If the sysctl option is enabled, a sysctl option
56209 + with name "forkfail_logging" is created.
56210 +
56211 +config GRKERNSEC_TIME
56212 + bool "Time change logging"
56213 + help
56214 + If you say Y here, any changes of the system clock will be logged.
56215 + If the sysctl option is enabled, a sysctl option with name
56216 + "timechange_logging" is created.
56217 +
56218 +config GRKERNSEC_PROC_IPADDR
56219 + bool "/proc/<pid>/ipaddr support"
56220 + help
56221 + If you say Y here, a new entry will be added to each /proc/<pid>
56222 + directory that contains the IP address of the person using the task.
56223 + The IP is carried across local TCP and AF_UNIX stream sockets.
56224 + This information can be useful for IDS/IPSes to perform remote response
56225 + to a local attack. The entry is readable by only the owner of the
56226 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56227 + the RBAC system), and thus does not create privacy concerns.
56228 +
56229 +config GRKERNSEC_RWXMAP_LOG
56230 + bool 'Denied RWX mmap/mprotect logging'
56231 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56232 + help
56233 + If you say Y here, calls to mmap() and mprotect() with explicit
56234 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56235 + denied by the PAX_MPROTECT feature. If the sysctl option is
56236 + enabled, a sysctl option with name "rwxmap_logging" is created.
56237 +
56238 +config GRKERNSEC_AUDIT_TEXTREL
56239 + bool 'ELF text relocations logging (READ HELP)'
56240 + depends on PAX_MPROTECT
56241 + help
56242 + If you say Y here, text relocations will be logged with the filename
56243 + of the offending library or binary. The purpose of the feature is
56244 + to help Linux distribution developers get rid of libraries and
56245 + binaries that need text relocations which hinder the future progress
56246 + of PaX. Only Linux distribution developers should say Y here, and
56247 + never on a production machine, as this option creates an information
56248 + leak that could aid an attacker in defeating the randomization of
56249 + a single memory region. If the sysctl option is enabled, a sysctl
56250 + option with name "audit_textrel" is created.
56251 +
56252 +endmenu
56253 +
56254 +menu "Executable Protections"
56255 +depends on GRKERNSEC
56256 +
56257 +config GRKERNSEC_DMESG
56258 + bool "Dmesg(8) restriction"
56259 + help
56260 + If you say Y here, non-root users will not be able to use dmesg(8)
56261 + to view up to the last 4kb of messages in the kernel's log buffer.
56262 + The kernel's log buffer often contains kernel addresses and other
56263 + identifying information useful to an attacker in fingerprinting a
56264 + system for a targeted exploit.
56265 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56266 + created.
56267 +
56268 +config GRKERNSEC_HARDEN_PTRACE
56269 + bool "Deter ptrace-based process snooping"
56270 + help
56271 + If you say Y here, TTY sniffers and other malicious monitoring
56272 + programs implemented through ptrace will be defeated. If you
56273 + have been using the RBAC system, this option has already been
56274 + enabled for several years for all users, with the ability to make
56275 + fine-grained exceptions.
56276 +
56277 + This option only affects the ability of non-root users to ptrace
56278 + processes that are not a descendent of the ptracing process.
56279 + This means that strace ./binary and gdb ./binary will still work,
56280 + but attaching to arbitrary processes will not. If the sysctl
56281 + option is enabled, a sysctl option with name "harden_ptrace" is
56282 + created.
56283 +
56284 +config GRKERNSEC_PTRACE_READEXEC
56285 + bool "Require read access to ptrace sensitive binaries"
56286 + help
56287 + If you say Y here, unprivileged users will not be able to ptrace unreadable
56288 + binaries. This option is useful in environments that
56289 + remove the read bits (e.g. file mode 4711) from suid binaries to
56290 + prevent infoleaking of their contents. This option adds
56291 + consistency to the use of that file mode, as the binary could normally
56292 + be read out when run without privileges while ptracing.
56293 +
56294 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
56295 + is created.
56296 +
56297 +config GRKERNSEC_SETXID
56298 + bool "Enforce consistent multithreaded privileges"
56299 + help
56300 + If you say Y here, a change from a root uid to a non-root uid
56301 + in a multithreaded application will cause the resulting uids,
56302 + gids, supplementary groups, and capabilities in that thread
56303 + to be propagated to the other threads of the process. In most
56304 + cases this is unnecessary, as glibc will emulate this behavior
56305 + on behalf of the application. Other libcs do not act in the
56306 + same way, allowing the other threads of the process to continue
56307 + running with root privileges. If the sysctl option is enabled,
56308 + a sysctl option with name "consistent_setxid" is created.
56309 +
56310 +config GRKERNSEC_TPE
56311 + bool "Trusted Path Execution (TPE)"
56312 + help
56313 + If you say Y here, you will be able to choose a gid to add to the
56314 + supplementary groups of users you want to mark as "untrusted."
56315 + These users will not be able to execute any files that are not in
56316 + root-owned directories writable only by root. If the sysctl option
56317 + is enabled, a sysctl option with name "tpe" is created.
56318 +
56319 +config GRKERNSEC_TPE_ALL
56320 + bool "Partially restrict all non-root users"
56321 + depends on GRKERNSEC_TPE
56322 + help
56323 + If you say Y here, all non-root users will be covered under
56324 + a weaker TPE restriction. This is separate from, and in addition to,
56325 + the main TPE options that you have selected elsewhere. Thus, if a
56326 + "trusted" GID is chosen, this restriction applies to even that GID.
56327 + Under this restriction, all non-root users will only be allowed to
56328 + execute files in directories they own that are not group or
56329 + world-writable, or in directories owned by root and writable only by
56330 + root. If the sysctl option is enabled, a sysctl option with name
56331 + "tpe_restrict_all" is created.
56332 +
56333 +config GRKERNSEC_TPE_INVERT
56334 + bool "Invert GID option"
56335 + depends on GRKERNSEC_TPE
56336 + help
56337 + If you say Y here, the group you specify in the TPE configuration will
56338 + decide what group TPE restrictions will be *disabled* for. This
56339 + option is useful if you want TPE restrictions to be applied to most
56340 + users on the system. If the sysctl option is enabled, a sysctl option
56341 + with name "tpe_invert" is created. Unlike other sysctl options, this
56342 + entry will default to on for backward-compatibility.
56343 +
56344 +config GRKERNSEC_TPE_GID
56345 + int "GID for untrusted users"
56346 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56347 + default 1005
56348 + help
56349 + Setting this GID determines what group TPE restrictions will be
56350 + *enabled* for. If the sysctl option is enabled, a sysctl option
56351 + with name "tpe_gid" is created.
56352 +
56353 +config GRKERNSEC_TPE_GID
56354 + int "GID for trusted users"
56355 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56356 + default 1005
56357 + help
56358 + Setting this GID determines what group TPE restrictions will be
56359 + *disabled* for. If the sysctl option is enabled, a sysctl option
56360 + with name "tpe_gid" is created.
56361 +
56362 +endmenu
56363 +menu "Network Protections"
56364 +depends on GRKERNSEC
56365 +
56366 +config GRKERNSEC_RANDNET
56367 + bool "Larger entropy pools"
56368 + help
56369 + If you say Y here, the entropy pools used for many features of Linux
56370 + and grsecurity will be doubled in size. Since several grsecurity
56371 + features use additional randomness, it is recommended that you say Y
56372 + here. Saying Y here has a similar effect as modifying
56373 + /proc/sys/kernel/random/poolsize.
56374 +
56375 +config GRKERNSEC_BLACKHOLE
56376 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56377 + depends on NET
56378 + help
56379 + If you say Y here, neither TCP resets nor ICMP
56380 + destination-unreachable packets will be sent in response to packets
56381 + sent to ports for which no associated listening process exists.
56382 + This feature supports both IPV4 and IPV6 and exempts the
56383 + loopback interface from blackholing. Enabling this feature
56384 + makes a host more resilient to DoS attacks and reduces network
56385 + visibility against scanners.
56386 +
56387 + The blackhole feature as-implemented is equivalent to the FreeBSD
56388 + blackhole feature, as it prevents RST responses to all packets, not
56389 + just SYNs. Under most application behavior this causes no
56390 + problems, but applications (like haproxy) may not close certain
56391 + connections in a way that cleanly terminates them on the remote
56392 + end, leaving the remote host in LAST_ACK state. Because of this
56393 + side-effect and to prevent intentional LAST_ACK DoSes, this
56394 + feature also adds automatic mitigation against such attacks.
56395 + The mitigation drastically reduces the amount of time a socket
56396 + can spend in LAST_ACK state. If you're using haproxy and not
56397 + all servers it connects to have this option enabled, consider
56398 + disabling this feature on the haproxy host.
56399 +
56400 + If the sysctl option is enabled, two sysctl options with names
56401 + "ip_blackhole" and "lastack_retries" will be created.
56402 + While "ip_blackhole" takes the standard zero/non-zero on/off
56403 + toggle, "lastack_retries" uses the same kinds of values as
56404 + "tcp_retries1" and "tcp_retries2". The default value of 4
56405 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56406 + state.
56407 +
56408 +config GRKERNSEC_SOCKET
56409 + bool "Socket restrictions"
56410 + depends on NET
56411 + help
56412 + If you say Y here, you will be able to choose from several options.
56413 + If you assign a GID on your system and add it to the supplementary
56414 + groups of users you want to restrict socket access to, this patch
56415 + will perform up to three things, based on the option(s) you choose.
56416 +
56417 +config GRKERNSEC_SOCKET_ALL
56418 + bool "Deny any sockets to group"
56419 + depends on GRKERNSEC_SOCKET
56420 + help
56421 + If you say Y here, you will be able to choose a GID of whose users will
56422 + be unable to connect to other hosts from your machine or run server
56423 + applications from your machine. If the sysctl option is enabled, a
56424 + sysctl option with name "socket_all" is created.
56425 +
56426 +config GRKERNSEC_SOCKET_ALL_GID
56427 + int "GID to deny all sockets for"
56428 + depends on GRKERNSEC_SOCKET_ALL
56429 + default 1004
56430 + help
56431 + Here you can choose the GID to disable socket access for. Remember to
56432 + add the users you want socket access disabled for to the GID
56433 + specified here. If the sysctl option is enabled, a sysctl option
56434 + with name "socket_all_gid" is created.
56435 +
56436 +config GRKERNSEC_SOCKET_CLIENT
56437 + bool "Deny client sockets to group"
56438 + depends on GRKERNSEC_SOCKET
56439 + help
56440 + If you say Y here, you will be able to choose a GID of whose users will
56441 + be unable to connect to other hosts from your machine, but will be
56442 + able to run servers. If this option is enabled, all users in the group
56443 + you specify will have to use passive mode when initiating ftp transfers
56444 + from the shell on your machine. If the sysctl option is enabled, a
56445 + sysctl option with name "socket_client" is created.
56446 +
56447 +config GRKERNSEC_SOCKET_CLIENT_GID
56448 + int "GID to deny client sockets for"
56449 + depends on GRKERNSEC_SOCKET_CLIENT
56450 + default 1003
56451 + help
56452 + Here you can choose the GID to disable client socket access for.
56453 + Remember to add the users you want client socket access disabled for to
56454 + the GID specified here. If the sysctl option is enabled, a sysctl
56455 + option with name "socket_client_gid" is created.
56456 +
56457 +config GRKERNSEC_SOCKET_SERVER
56458 + bool "Deny server sockets to group"
56459 + depends on GRKERNSEC_SOCKET
56460 + help
56461 + If you say Y here, you will be able to choose a GID of whose users will
56462 + be unable to run server applications from your machine. If the sysctl
56463 + option is enabled, a sysctl option with name "socket_server" is created.
56464 +
56465 +config GRKERNSEC_SOCKET_SERVER_GID
56466 + int "GID to deny server sockets for"
56467 + depends on GRKERNSEC_SOCKET_SERVER
56468 + default 1002
56469 + help
56470 + Here you can choose the GID to disable server socket access for.
56471 + Remember to add the users you want server socket access disabled for to
56472 + the GID specified here. If the sysctl option is enabled, a sysctl
56473 + option with name "socket_server_gid" is created.
56474 +
56475 +endmenu
56476 +menu "Sysctl support"
56477 +depends on GRKERNSEC && SYSCTL
56478 +
56479 +config GRKERNSEC_SYSCTL
56480 + bool "Sysctl support"
56481 + help
56482 + If you say Y here, you will be able to change the options that
56483 + grsecurity runs with at bootup, without having to recompile your
56484 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56485 + to enable (1) or disable (0) various features. All the sysctl entries
56486 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56487 + All features enabled in the kernel configuration are disabled at boot
56488 + if you do not say Y to the "Turn on features by default" option.
56489 + All options should be set at startup, and the grsec_lock entry should
56490 + be set to a non-zero value after all the options are set.
56491 + *THIS IS EXTREMELY IMPORTANT*
56492 +
56493 +config GRKERNSEC_SYSCTL_DISTRO
56494 + bool "Extra sysctl support for distro makers (READ HELP)"
56495 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56496 + help
56497 + If you say Y here, additional sysctl options will be created
56498 + for features that affect processes running as root. Therefore,
56499 + it is critical when using this option that the grsec_lock entry be
56500 + enabled after boot. Only distros with prebuilt kernel packages
56501 + with this option enabled that can ensure grsec_lock is enabled
56502 + after boot should use this option.
56503 + *Failure to set grsec_lock after boot makes all grsec features
56504 + this option covers useless*
56505 +
56506 + Currently this option creates the following sysctl entries:
56507 + "Disable Privileged I/O": "disable_priv_io"
56508 +
56509 +config GRKERNSEC_SYSCTL_ON
56510 + bool "Turn on features by default"
56511 + depends on GRKERNSEC_SYSCTL
56512 + help
56513 + If you say Y here, instead of having all features enabled in the
56514 + kernel configuration disabled at boot time, the features will be
56515 + enabled at boot time. It is recommended you say Y here unless
56516 + there is some reason you would want all sysctl-tunable features to
56517 + be disabled by default. As mentioned elsewhere, it is important
56518 + to enable the grsec_lock entry once you have finished modifying
56519 + the sysctl entries.
56520 +
56521 +endmenu
56522 +menu "Logging Options"
56523 +depends on GRKERNSEC
56524 +
56525 +config GRKERNSEC_FLOODTIME
56526 + int "Seconds in between log messages (minimum)"
56527 + default 10
56528 + help
56529 + This option allows you to enforce the number of seconds between
56530 + grsecurity log messages. The default should be suitable for most
56531 + people, however, if you choose to change it, choose a value small enough
56532 + to allow informative logs to be produced, but large enough to
56533 + prevent flooding.
56534 +
56535 +config GRKERNSEC_FLOODBURST
56536 + int "Number of messages in a burst (maximum)"
56537 + default 6
56538 + help
56539 + This option allows you to choose the maximum number of messages allowed
56540 + within the flood time interval you chose in a separate option. The
56541 + default should be suitable for most people, however if you find that
56542 + many of your logs are being interpreted as flooding, you may want to
56543 + raise this value.
56544 +
56545 +endmenu
56546 +
56547 +endmenu
56548 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
56549 new file mode 100644
56550 index 0000000..1b9afa9
56551 --- /dev/null
56552 +++ b/grsecurity/Makefile
56553 @@ -0,0 +1,38 @@
56554 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56555 +# during 2001-2009 it has been completely redesigned by Brad Spengler
56556 +# into an RBAC system
56557 +#
56558 +# All code in this directory and various hooks inserted throughout the kernel
56559 +# are copyright Brad Spengler - Open Source Security, Inc., and released
56560 +# under the GPL v2 or higher
56561 +
56562 +KBUILD_CFLAGS += -Werror
56563 +
56564 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56565 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
56566 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56567 +
56568 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56569 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56570 + gracl_learn.o grsec_log.o
56571 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56572 +
56573 +ifdef CONFIG_NET
56574 +obj-y += grsec_sock.o
56575 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56576 +endif
56577 +
56578 +ifndef CONFIG_GRKERNSEC
56579 +obj-y += grsec_disabled.o
56580 +endif
56581 +
56582 +ifdef CONFIG_GRKERNSEC_HIDESYM
56583 +extra-y := grsec_hidesym.o
56584 +$(obj)/grsec_hidesym.o:
56585 + @-chmod -f 500 /boot
56586 + @-chmod -f 500 /lib/modules
56587 + @-chmod -f 500 /lib64/modules
56588 + @-chmod -f 500 /lib32/modules
56589 + @-chmod -f 700 .
56590 + @echo ' grsec: protected kernel image paths'
56591 +endif
56592 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
56593 new file mode 100644
56594 index 0000000..dc4812b
56595 --- /dev/null
56596 +++ b/grsecurity/gracl.c
56597 @@ -0,0 +1,4148 @@
56598 +#include <linux/kernel.h>
56599 +#include <linux/module.h>
56600 +#include <linux/sched.h>
56601 +#include <linux/mm.h>
56602 +#include <linux/file.h>
56603 +#include <linux/fs.h>
56604 +#include <linux/namei.h>
56605 +#include <linux/mount.h>
56606 +#include <linux/tty.h>
56607 +#include <linux/proc_fs.h>
56608 +#include <linux/smp_lock.h>
56609 +#include <linux/slab.h>
56610 +#include <linux/vmalloc.h>
56611 +#include <linux/types.h>
56612 +#include <linux/sysctl.h>
56613 +#include <linux/netdevice.h>
56614 +#include <linux/ptrace.h>
56615 +#include <linux/gracl.h>
56616 +#include <linux/gralloc.h>
56617 +#include <linux/security.h>
56618 +#include <linux/grinternal.h>
56619 +#include <linux/pid_namespace.h>
56620 +#include <linux/fdtable.h>
56621 +#include <linux/percpu.h>
56622 +
56623 +#include <asm/uaccess.h>
56624 +#include <asm/errno.h>
56625 +#include <asm/mman.h>
56626 +
56627 +static struct acl_role_db acl_role_set;
56628 +static struct name_db name_set;
56629 +static struct inodev_db inodev_set;
56630 +
56631 +/* for keeping track of userspace pointers used for subjects, so we
56632 + can share references in the kernel as well
56633 +*/
56634 +
56635 +static struct dentry *real_root;
56636 +static struct vfsmount *real_root_mnt;
56637 +
56638 +static struct acl_subj_map_db subj_map_set;
56639 +
56640 +static struct acl_role_label *default_role;
56641 +
56642 +static struct acl_role_label *role_list;
56643 +
56644 +static u16 acl_sp_role_value;
56645 +
56646 +extern char *gr_shared_page[4];
56647 +static DEFINE_MUTEX(gr_dev_mutex);
56648 +DEFINE_RWLOCK(gr_inode_lock);
56649 +
56650 +struct gr_arg *gr_usermode;
56651 +
56652 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
56653 +
56654 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
56655 +extern void gr_clear_learn_entries(void);
56656 +
56657 +#ifdef CONFIG_GRKERNSEC_RESLOG
56658 +extern void gr_log_resource(const struct task_struct *task,
56659 + const int res, const unsigned long wanted, const int gt);
56660 +#endif
56661 +
56662 +unsigned char *gr_system_salt;
56663 +unsigned char *gr_system_sum;
56664 +
56665 +static struct sprole_pw **acl_special_roles = NULL;
56666 +static __u16 num_sprole_pws = 0;
56667 +
56668 +static struct acl_role_label *kernel_role = NULL;
56669 +
56670 +static unsigned int gr_auth_attempts = 0;
56671 +static unsigned long gr_auth_expires = 0UL;
56672 +
56673 +#ifdef CONFIG_NET
56674 +extern struct vfsmount *sock_mnt;
56675 +#endif
56676 +extern struct vfsmount *pipe_mnt;
56677 +extern struct vfsmount *shm_mnt;
56678 +#ifdef CONFIG_HUGETLBFS
56679 +extern struct vfsmount *hugetlbfs_vfsmount;
56680 +#endif
56681 +
56682 +static struct acl_object_label *fakefs_obj_rw;
56683 +static struct acl_object_label *fakefs_obj_rwx;
56684 +
56685 +extern int gr_init_uidset(void);
56686 +extern void gr_free_uidset(void);
56687 +extern void gr_remove_uid(uid_t uid);
56688 +extern int gr_find_uid(uid_t uid);
56689 +
56690 +__inline__ int
56691 +gr_acl_is_enabled(void)
56692 +{
56693 + return (gr_status & GR_READY);
56694 +}
56695 +
56696 +#ifdef CONFIG_BTRFS_FS
56697 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56698 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56699 +#endif
56700 +
56701 +static inline dev_t __get_dev(const struct dentry *dentry)
56702 +{
56703 +#ifdef CONFIG_BTRFS_FS
56704 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56705 + return get_btrfs_dev_from_inode(dentry->d_inode);
56706 + else
56707 +#endif
56708 + return dentry->d_inode->i_sb->s_dev;
56709 +}
56710 +
56711 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56712 +{
56713 + return __get_dev(dentry);
56714 +}
56715 +
56716 +static char gr_task_roletype_to_char(struct task_struct *task)
56717 +{
56718 + switch (task->role->roletype &
56719 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
56720 + GR_ROLE_SPECIAL)) {
56721 + case GR_ROLE_DEFAULT:
56722 + return 'D';
56723 + case GR_ROLE_USER:
56724 + return 'U';
56725 + case GR_ROLE_GROUP:
56726 + return 'G';
56727 + case GR_ROLE_SPECIAL:
56728 + return 'S';
56729 + }
56730 +
56731 + return 'X';
56732 +}
56733 +
56734 +char gr_roletype_to_char(void)
56735 +{
56736 + return gr_task_roletype_to_char(current);
56737 +}
56738 +
56739 +__inline__ int
56740 +gr_acl_tpe_check(void)
56741 +{
56742 + if (unlikely(!(gr_status & GR_READY)))
56743 + return 0;
56744 + if (current->role->roletype & GR_ROLE_TPE)
56745 + return 1;
56746 + else
56747 + return 0;
56748 +}
56749 +
56750 +int
56751 +gr_handle_rawio(const struct inode *inode)
56752 +{
56753 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56754 + if (inode && S_ISBLK(inode->i_mode) &&
56755 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56756 + !capable(CAP_SYS_RAWIO))
56757 + return 1;
56758 +#endif
56759 + return 0;
56760 +}
56761 +
56762 +static int
56763 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56764 +{
56765 + if (likely(lena != lenb))
56766 + return 0;
56767 +
56768 + return !memcmp(a, b, lena);
56769 +}
56770 +
56771 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56772 +{
56773 + *buflen -= namelen;
56774 + if (*buflen < 0)
56775 + return -ENAMETOOLONG;
56776 + *buffer -= namelen;
56777 + memcpy(*buffer, str, namelen);
56778 + return 0;
56779 +}
56780 +
56781 +/* this must be called with vfsmount_lock and dcache_lock held */
56782 +
56783 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56784 + struct dentry *root, struct vfsmount *rootmnt,
56785 + char *buffer, int buflen)
56786 +{
56787 + char * end = buffer+buflen;
56788 + char * retval;
56789 + int namelen;
56790 +
56791 + *--end = '\0';
56792 + buflen--;
56793 +
56794 + if (buflen < 1)
56795 + goto Elong;
56796 + /* Get '/' right */
56797 + retval = end-1;
56798 + *retval = '/';
56799 +
56800 + for (;;) {
56801 + struct dentry * parent;
56802 +
56803 + if (dentry == root && vfsmnt == rootmnt)
56804 + break;
56805 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56806 + /* Global root? */
56807 + if (vfsmnt->mnt_parent == vfsmnt)
56808 + goto global_root;
56809 + dentry = vfsmnt->mnt_mountpoint;
56810 + vfsmnt = vfsmnt->mnt_parent;
56811 + continue;
56812 + }
56813 + parent = dentry->d_parent;
56814 + prefetch(parent);
56815 + namelen = dentry->d_name.len;
56816 + buflen -= namelen + 1;
56817 + if (buflen < 0)
56818 + goto Elong;
56819 + end -= namelen;
56820 + memcpy(end, dentry->d_name.name, namelen);
56821 + *--end = '/';
56822 + retval = end;
56823 + dentry = parent;
56824 + }
56825 +
56826 +out:
56827 + return retval;
56828 +
56829 +global_root:
56830 + namelen = dentry->d_name.len;
56831 + buflen -= namelen;
56832 + if (buflen < 0)
56833 + goto Elong;
56834 + retval -= namelen-1; /* hit the slash */
56835 + memcpy(retval, dentry->d_name.name, namelen);
56836 + goto out;
56837 +Elong:
56838 + retval = ERR_PTR(-ENAMETOOLONG);
56839 + goto out;
56840 +}
56841 +
56842 +static char *
56843 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56844 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
56845 +{
56846 + char *retval;
56847 +
56848 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
56849 + if (unlikely(IS_ERR(retval)))
56850 + retval = strcpy(buf, "<path too long>");
56851 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56852 + retval[1] = '\0';
56853 +
56854 + return retval;
56855 +}
56856 +
56857 +static char *
56858 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56859 + char *buf, int buflen)
56860 +{
56861 + char *res;
56862 +
56863 + /* we can use real_root, real_root_mnt, because this is only called
56864 + by the RBAC system */
56865 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
56866 +
56867 + return res;
56868 +}
56869 +
56870 +static char *
56871 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56872 + char *buf, int buflen)
56873 +{
56874 + char *res;
56875 + struct dentry *root;
56876 + struct vfsmount *rootmnt;
56877 + struct task_struct *reaper = &init_task;
56878 +
56879 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
56880 + read_lock(&reaper->fs->lock);
56881 + root = dget(reaper->fs->root.dentry);
56882 + rootmnt = mntget(reaper->fs->root.mnt);
56883 + read_unlock(&reaper->fs->lock);
56884 +
56885 + spin_lock(&dcache_lock);
56886 + spin_lock(&vfsmount_lock);
56887 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
56888 + spin_unlock(&vfsmount_lock);
56889 + spin_unlock(&dcache_lock);
56890 +
56891 + dput(root);
56892 + mntput(rootmnt);
56893 + return res;
56894 +}
56895 +
56896 +static char *
56897 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56898 +{
56899 + char *ret;
56900 + spin_lock(&dcache_lock);
56901 + spin_lock(&vfsmount_lock);
56902 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56903 + PAGE_SIZE);
56904 + spin_unlock(&vfsmount_lock);
56905 + spin_unlock(&dcache_lock);
56906 + return ret;
56907 +}
56908 +
56909 +static char *
56910 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56911 +{
56912 + char *ret;
56913 + char *buf;
56914 + int buflen;
56915 +
56916 + spin_lock(&dcache_lock);
56917 + spin_lock(&vfsmount_lock);
56918 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
56919 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
56920 + buflen = (int)(ret - buf);
56921 + if (buflen >= 5)
56922 + prepend(&ret, &buflen, "/proc", 5);
56923 + else
56924 + ret = strcpy(buf, "<path too long>");
56925 + spin_unlock(&vfsmount_lock);
56926 + spin_unlock(&dcache_lock);
56927 + return ret;
56928 +}
56929 +
56930 +char *
56931 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
56932 +{
56933 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56934 + PAGE_SIZE);
56935 +}
56936 +
56937 +char *
56938 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
56939 +{
56940 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
56941 + PAGE_SIZE);
56942 +}
56943 +
56944 +char *
56945 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
56946 +{
56947 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
56948 + PAGE_SIZE);
56949 +}
56950 +
56951 +char *
56952 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
56953 +{
56954 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
56955 + PAGE_SIZE);
56956 +}
56957 +
56958 +char *
56959 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
56960 +{
56961 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
56962 + PAGE_SIZE);
56963 +}
56964 +
56965 +__inline__ __u32
56966 +to_gr_audit(const __u32 reqmode)
56967 +{
56968 + /* masks off auditable permission flags, then shifts them to create
56969 + auditing flags, and adds the special case of append auditing if
56970 + we're requesting write */
56971 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
56972 +}
56973 +
56974 +struct acl_subject_label *
56975 +lookup_subject_map(const struct acl_subject_label *userp)
56976 +{
56977 + unsigned int index = shash(userp, subj_map_set.s_size);
56978 + struct subject_map *match;
56979 +
56980 + match = subj_map_set.s_hash[index];
56981 +
56982 + while (match && match->user != userp)
56983 + match = match->next;
56984 +
56985 + if (match != NULL)
56986 + return match->kernel;
56987 + else
56988 + return NULL;
56989 +}
56990 +
56991 +static void
56992 +insert_subj_map_entry(struct subject_map *subjmap)
56993 +{
56994 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
56995 + struct subject_map **curr;
56996 +
56997 + subjmap->prev = NULL;
56998 +
56999 + curr = &subj_map_set.s_hash[index];
57000 + if (*curr != NULL)
57001 + (*curr)->prev = subjmap;
57002 +
57003 + subjmap->next = *curr;
57004 + *curr = subjmap;
57005 +
57006 + return;
57007 +}
57008 +
57009 +static struct acl_role_label *
57010 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
57011 + const gid_t gid)
57012 +{
57013 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
57014 + struct acl_role_label *match;
57015 + struct role_allowed_ip *ipp;
57016 + unsigned int x;
57017 + u32 curr_ip = task->signal->curr_ip;
57018 +
57019 + task->signal->saved_ip = curr_ip;
57020 +
57021 + match = acl_role_set.r_hash[index];
57022 +
57023 + while (match) {
57024 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
57025 + for (x = 0; x < match->domain_child_num; x++) {
57026 + if (match->domain_children[x] == uid)
57027 + goto found;
57028 + }
57029 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
57030 + break;
57031 + match = match->next;
57032 + }
57033 +found:
57034 + if (match == NULL) {
57035 + try_group:
57036 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
57037 + match = acl_role_set.r_hash[index];
57038 +
57039 + while (match) {
57040 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
57041 + for (x = 0; x < match->domain_child_num; x++) {
57042 + if (match->domain_children[x] == gid)
57043 + goto found2;
57044 + }
57045 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
57046 + break;
57047 + match = match->next;
57048 + }
57049 +found2:
57050 + if (match == NULL)
57051 + match = default_role;
57052 + if (match->allowed_ips == NULL)
57053 + return match;
57054 + else {
57055 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57056 + if (likely
57057 + ((ntohl(curr_ip) & ipp->netmask) ==
57058 + (ntohl(ipp->addr) & ipp->netmask)))
57059 + return match;
57060 + }
57061 + match = default_role;
57062 + }
57063 + } else if (match->allowed_ips == NULL) {
57064 + return match;
57065 + } else {
57066 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57067 + if (likely
57068 + ((ntohl(curr_ip) & ipp->netmask) ==
57069 + (ntohl(ipp->addr) & ipp->netmask)))
57070 + return match;
57071 + }
57072 + goto try_group;
57073 + }
57074 +
57075 + return match;
57076 +}
57077 +
57078 +struct acl_subject_label *
57079 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
57080 + const struct acl_role_label *role)
57081 +{
57082 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
57083 + struct acl_subject_label *match;
57084 +
57085 + match = role->subj_hash[index];
57086 +
57087 + while (match && (match->inode != ino || match->device != dev ||
57088 + (match->mode & GR_DELETED))) {
57089 + match = match->next;
57090 + }
57091 +
57092 + if (match && !(match->mode & GR_DELETED))
57093 + return match;
57094 + else
57095 + return NULL;
57096 +}
57097 +
57098 +struct acl_subject_label *
57099 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
57100 + const struct acl_role_label *role)
57101 +{
57102 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
57103 + struct acl_subject_label *match;
57104 +
57105 + match = role->subj_hash[index];
57106 +
57107 + while (match && (match->inode != ino || match->device != dev ||
57108 + !(match->mode & GR_DELETED))) {
57109 + match = match->next;
57110 + }
57111 +
57112 + if (match && (match->mode & GR_DELETED))
57113 + return match;
57114 + else
57115 + return NULL;
57116 +}
57117 +
57118 +static struct acl_object_label *
57119 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
57120 + const struct acl_subject_label *subj)
57121 +{
57122 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
57123 + struct acl_object_label *match;
57124 +
57125 + match = subj->obj_hash[index];
57126 +
57127 + while (match && (match->inode != ino || match->device != dev ||
57128 + (match->mode & GR_DELETED))) {
57129 + match = match->next;
57130 + }
57131 +
57132 + if (match && !(match->mode & GR_DELETED))
57133 + return match;
57134 + else
57135 + return NULL;
57136 +}
57137 +
57138 +static struct acl_object_label *
57139 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
57140 + const struct acl_subject_label *subj)
57141 +{
57142 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
57143 + struct acl_object_label *match;
57144 +
57145 + match = subj->obj_hash[index];
57146 +
57147 + while (match && (match->inode != ino || match->device != dev ||
57148 + !(match->mode & GR_DELETED))) {
57149 + match = match->next;
57150 + }
57151 +
57152 + if (match && (match->mode & GR_DELETED))
57153 + return match;
57154 +
57155 + match = subj->obj_hash[index];
57156 +
57157 + while (match && (match->inode != ino || match->device != dev ||
57158 + (match->mode & GR_DELETED))) {
57159 + match = match->next;
57160 + }
57161 +
57162 + if (match && !(match->mode & GR_DELETED))
57163 + return match;
57164 + else
57165 + return NULL;
57166 +}
57167 +
57168 +static struct name_entry *
57169 +lookup_name_entry(const char *name)
57170 +{
57171 + unsigned int len = strlen(name);
57172 + unsigned int key = full_name_hash(name, len);
57173 + unsigned int index = key % name_set.n_size;
57174 + struct name_entry *match;
57175 +
57176 + match = name_set.n_hash[index];
57177 +
57178 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
57179 + match = match->next;
57180 +
57181 + return match;
57182 +}
57183 +
57184 +static struct name_entry *
57185 +lookup_name_entry_create(const char *name)
57186 +{
57187 + unsigned int len = strlen(name);
57188 + unsigned int key = full_name_hash(name, len);
57189 + unsigned int index = key % name_set.n_size;
57190 + struct name_entry *match;
57191 +
57192 + match = name_set.n_hash[index];
57193 +
57194 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57195 + !match->deleted))
57196 + match = match->next;
57197 +
57198 + if (match && match->deleted)
57199 + return match;
57200 +
57201 + match = name_set.n_hash[index];
57202 +
57203 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57204 + match->deleted))
57205 + match = match->next;
57206 +
57207 + if (match && !match->deleted)
57208 + return match;
57209 + else
57210 + return NULL;
57211 +}
57212 +
57213 +static struct inodev_entry *
57214 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
57215 +{
57216 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
57217 + struct inodev_entry *match;
57218 +
57219 + match = inodev_set.i_hash[index];
57220 +
57221 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
57222 + match = match->next;
57223 +
57224 + return match;
57225 +}
57226 +
57227 +static void
57228 +insert_inodev_entry(struct inodev_entry *entry)
57229 +{
57230 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
57231 + inodev_set.i_size);
57232 + struct inodev_entry **curr;
57233 +
57234 + entry->prev = NULL;
57235 +
57236 + curr = &inodev_set.i_hash[index];
57237 + if (*curr != NULL)
57238 + (*curr)->prev = entry;
57239 +
57240 + entry->next = *curr;
57241 + *curr = entry;
57242 +
57243 + return;
57244 +}
57245 +
57246 +static void
57247 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
57248 +{
57249 + unsigned int index =
57250 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
57251 + struct acl_role_label **curr;
57252 + struct acl_role_label *tmp;
57253 +
57254 + curr = &acl_role_set.r_hash[index];
57255 +
57256 + /* if role was already inserted due to domains and already has
57257 + a role in the same bucket as it attached, then we need to
57258 + combine these two buckets
57259 + */
57260 + if (role->next) {
57261 + tmp = role->next;
57262 + while (tmp->next)
57263 + tmp = tmp->next;
57264 + tmp->next = *curr;
57265 + } else
57266 + role->next = *curr;
57267 + *curr = role;
57268 +
57269 + return;
57270 +}
57271 +
57272 +static void
57273 +insert_acl_role_label(struct acl_role_label *role)
57274 +{
57275 + int i;
57276 +
57277 + if (role_list == NULL) {
57278 + role_list = role;
57279 + role->prev = NULL;
57280 + } else {
57281 + role->prev = role_list;
57282 + role_list = role;
57283 + }
57284 +
57285 + /* used for hash chains */
57286 + role->next = NULL;
57287 +
57288 + if (role->roletype & GR_ROLE_DOMAIN) {
57289 + for (i = 0; i < role->domain_child_num; i++)
57290 + __insert_acl_role_label(role, role->domain_children[i]);
57291 + } else
57292 + __insert_acl_role_label(role, role->uidgid);
57293 +}
57294 +
57295 +static int
57296 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
57297 +{
57298 + struct name_entry **curr, *nentry;
57299 + struct inodev_entry *ientry;
57300 + unsigned int len = strlen(name);
57301 + unsigned int key = full_name_hash(name, len);
57302 + unsigned int index = key % name_set.n_size;
57303 +
57304 + curr = &name_set.n_hash[index];
57305 +
57306 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
57307 + curr = &((*curr)->next);
57308 +
57309 + if (*curr != NULL)
57310 + return 1;
57311 +
57312 + nentry = acl_alloc(sizeof (struct name_entry));
57313 + if (nentry == NULL)
57314 + return 0;
57315 + ientry = acl_alloc(sizeof (struct inodev_entry));
57316 + if (ientry == NULL)
57317 + return 0;
57318 + ientry->nentry = nentry;
57319 +
57320 + nentry->key = key;
57321 + nentry->name = name;
57322 + nentry->inode = inode;
57323 + nentry->device = device;
57324 + nentry->len = len;
57325 + nentry->deleted = deleted;
57326 +
57327 + nentry->prev = NULL;
57328 + curr = &name_set.n_hash[index];
57329 + if (*curr != NULL)
57330 + (*curr)->prev = nentry;
57331 + nentry->next = *curr;
57332 + *curr = nentry;
57333 +
57334 + /* insert us into the table searchable by inode/dev */
57335 + insert_inodev_entry(ientry);
57336 +
57337 + return 1;
57338 +}
57339 +
57340 +static void
57341 +insert_acl_obj_label(struct acl_object_label *obj,
57342 + struct acl_subject_label *subj)
57343 +{
57344 + unsigned int index =
57345 + fhash(obj->inode, obj->device, subj->obj_hash_size);
57346 + struct acl_object_label **curr;
57347 +
57348 +
57349 + obj->prev = NULL;
57350 +
57351 + curr = &subj->obj_hash[index];
57352 + if (*curr != NULL)
57353 + (*curr)->prev = obj;
57354 +
57355 + obj->next = *curr;
57356 + *curr = obj;
57357 +
57358 + return;
57359 +}
57360 +
57361 +static void
57362 +insert_acl_subj_label(struct acl_subject_label *obj,
57363 + struct acl_role_label *role)
57364 +{
57365 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
57366 + struct acl_subject_label **curr;
57367 +
57368 + obj->prev = NULL;
57369 +
57370 + curr = &role->subj_hash[index];
57371 + if (*curr != NULL)
57372 + (*curr)->prev = obj;
57373 +
57374 + obj->next = *curr;
57375 + *curr = obj;
57376 +
57377 + return;
57378 +}
57379 +
57380 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
57381 +
57382 +static void *
57383 +create_table(__u32 * len, int elementsize)
57384 +{
57385 + unsigned int table_sizes[] = {
57386 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
57387 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
57388 + 4194301, 8388593, 16777213, 33554393, 67108859
57389 + };
57390 + void *newtable = NULL;
57391 + unsigned int pwr = 0;
57392 +
57393 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
57394 + table_sizes[pwr] <= *len)
57395 + pwr++;
57396 +
57397 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
57398 + return newtable;
57399 +
57400 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
57401 + newtable =
57402 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
57403 + else
57404 + newtable = vmalloc(table_sizes[pwr] * elementsize);
57405 +
57406 + *len = table_sizes[pwr];
57407 +
57408 + return newtable;
57409 +}
57410 +
57411 +static int
57412 +init_variables(const struct gr_arg *arg)
57413 +{
57414 + struct task_struct *reaper = &init_task;
57415 + unsigned int stacksize;
57416 +
57417 + subj_map_set.s_size = arg->role_db.num_subjects;
57418 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
57419 + name_set.n_size = arg->role_db.num_objects;
57420 + inodev_set.i_size = arg->role_db.num_objects;
57421 +
57422 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
57423 + !name_set.n_size || !inodev_set.i_size)
57424 + return 1;
57425 +
57426 + if (!gr_init_uidset())
57427 + return 1;
57428 +
57429 + /* set up the stack that holds allocation info */
57430 +
57431 + stacksize = arg->role_db.num_pointers + 5;
57432 +
57433 + if (!acl_alloc_stack_init(stacksize))
57434 + return 1;
57435 +
57436 + /* grab reference for the real root dentry and vfsmount */
57437 + read_lock(&reaper->fs->lock);
57438 + real_root = dget(reaper->fs->root.dentry);
57439 + real_root_mnt = mntget(reaper->fs->root.mnt);
57440 + read_unlock(&reaper->fs->lock);
57441 +
57442 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57443 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
57444 +#endif
57445 +
57446 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
57447 + if (fakefs_obj_rw == NULL)
57448 + return 1;
57449 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
57450 +
57451 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
57452 + if (fakefs_obj_rwx == NULL)
57453 + return 1;
57454 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
57455 +
57456 + subj_map_set.s_hash =
57457 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
57458 + acl_role_set.r_hash =
57459 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
57460 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
57461 + inodev_set.i_hash =
57462 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
57463 +
57464 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
57465 + !name_set.n_hash || !inodev_set.i_hash)
57466 + return 1;
57467 +
57468 + memset(subj_map_set.s_hash, 0,
57469 + sizeof(struct subject_map *) * subj_map_set.s_size);
57470 + memset(acl_role_set.r_hash, 0,
57471 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
57472 + memset(name_set.n_hash, 0,
57473 + sizeof (struct name_entry *) * name_set.n_size);
57474 + memset(inodev_set.i_hash, 0,
57475 + sizeof (struct inodev_entry *) * inodev_set.i_size);
57476 +
57477 + return 0;
57478 +}
57479 +
57480 +/* free information not needed after startup
57481 + currently contains user->kernel pointer mappings for subjects
57482 +*/
57483 +
57484 +static void
57485 +free_init_variables(void)
57486 +{
57487 + __u32 i;
57488 +
57489 + if (subj_map_set.s_hash) {
57490 + for (i = 0; i < subj_map_set.s_size; i++) {
57491 + if (subj_map_set.s_hash[i]) {
57492 + kfree(subj_map_set.s_hash[i]);
57493 + subj_map_set.s_hash[i] = NULL;
57494 + }
57495 + }
57496 +
57497 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
57498 + PAGE_SIZE)
57499 + kfree(subj_map_set.s_hash);
57500 + else
57501 + vfree(subj_map_set.s_hash);
57502 + }
57503 +
57504 + return;
57505 +}
57506 +
57507 +static void
57508 +free_variables(void)
57509 +{
57510 + struct acl_subject_label *s;
57511 + struct acl_role_label *r;
57512 + struct task_struct *task, *task2;
57513 + unsigned int x;
57514 +
57515 + gr_clear_learn_entries();
57516 +
57517 + read_lock(&tasklist_lock);
57518 + do_each_thread(task2, task) {
57519 + task->acl_sp_role = 0;
57520 + task->acl_role_id = 0;
57521 + task->acl = NULL;
57522 + task->role = NULL;
57523 + } while_each_thread(task2, task);
57524 + read_unlock(&tasklist_lock);
57525 +
57526 + /* release the reference to the real root dentry and vfsmount */
57527 + if (real_root)
57528 + dput(real_root);
57529 + real_root = NULL;
57530 + if (real_root_mnt)
57531 + mntput(real_root_mnt);
57532 + real_root_mnt = NULL;
57533 +
57534 + /* free all object hash tables */
57535 +
57536 + FOR_EACH_ROLE_START(r)
57537 + if (r->subj_hash == NULL)
57538 + goto next_role;
57539 + FOR_EACH_SUBJECT_START(r, s, x)
57540 + if (s->obj_hash == NULL)
57541 + break;
57542 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57543 + kfree(s->obj_hash);
57544 + else
57545 + vfree(s->obj_hash);
57546 + FOR_EACH_SUBJECT_END(s, x)
57547 + FOR_EACH_NESTED_SUBJECT_START(r, s)
57548 + if (s->obj_hash == NULL)
57549 + break;
57550 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57551 + kfree(s->obj_hash);
57552 + else
57553 + vfree(s->obj_hash);
57554 + FOR_EACH_NESTED_SUBJECT_END(s)
57555 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
57556 + kfree(r->subj_hash);
57557 + else
57558 + vfree(r->subj_hash);
57559 + r->subj_hash = NULL;
57560 +next_role:
57561 + FOR_EACH_ROLE_END(r)
57562 +
57563 + acl_free_all();
57564 +
57565 + if (acl_role_set.r_hash) {
57566 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
57567 + PAGE_SIZE)
57568 + kfree(acl_role_set.r_hash);
57569 + else
57570 + vfree(acl_role_set.r_hash);
57571 + }
57572 + if (name_set.n_hash) {
57573 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
57574 + PAGE_SIZE)
57575 + kfree(name_set.n_hash);
57576 + else
57577 + vfree(name_set.n_hash);
57578 + }
57579 +
57580 + if (inodev_set.i_hash) {
57581 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
57582 + PAGE_SIZE)
57583 + kfree(inodev_set.i_hash);
57584 + else
57585 + vfree(inodev_set.i_hash);
57586 + }
57587 +
57588 + gr_free_uidset();
57589 +
57590 + memset(&name_set, 0, sizeof (struct name_db));
57591 + memset(&inodev_set, 0, sizeof (struct inodev_db));
57592 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
57593 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
57594 +
57595 + default_role = NULL;
57596 + role_list = NULL;
57597 +
57598 + return;
57599 +}
57600 +
57601 +static __u32
57602 +count_user_objs(struct acl_object_label *userp)
57603 +{
57604 + struct acl_object_label o_tmp;
57605 + __u32 num = 0;
57606 +
57607 + while (userp) {
57608 + if (copy_from_user(&o_tmp, userp,
57609 + sizeof (struct acl_object_label)))
57610 + break;
57611 +
57612 + userp = o_tmp.prev;
57613 + num++;
57614 + }
57615 +
57616 + return num;
57617 +}
57618 +
57619 +static struct acl_subject_label *
57620 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
57621 +
57622 +static int
57623 +copy_user_glob(struct acl_object_label *obj)
57624 +{
57625 + struct acl_object_label *g_tmp, **guser;
57626 + unsigned int len;
57627 + char *tmp;
57628 +
57629 + if (obj->globbed == NULL)
57630 + return 0;
57631 +
57632 + guser = &obj->globbed;
57633 + while (*guser) {
57634 + g_tmp = (struct acl_object_label *)
57635 + acl_alloc(sizeof (struct acl_object_label));
57636 + if (g_tmp == NULL)
57637 + return -ENOMEM;
57638 +
57639 + if (copy_from_user(g_tmp, *guser,
57640 + sizeof (struct acl_object_label)))
57641 + return -EFAULT;
57642 +
57643 + len = strnlen_user(g_tmp->filename, PATH_MAX);
57644 +
57645 + if (!len || len >= PATH_MAX)
57646 + return -EINVAL;
57647 +
57648 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57649 + return -ENOMEM;
57650 +
57651 + if (copy_from_user(tmp, g_tmp->filename, len))
57652 + return -EFAULT;
57653 + tmp[len-1] = '\0';
57654 + g_tmp->filename = tmp;
57655 +
57656 + *guser = g_tmp;
57657 + guser = &(g_tmp->next);
57658 + }
57659 +
57660 + return 0;
57661 +}
57662 +
57663 +static int
57664 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
57665 + struct acl_role_label *role)
57666 +{
57667 + struct acl_object_label *o_tmp;
57668 + unsigned int len;
57669 + int ret;
57670 + char *tmp;
57671 +
57672 + while (userp) {
57673 + if ((o_tmp = (struct acl_object_label *)
57674 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
57675 + return -ENOMEM;
57676 +
57677 + if (copy_from_user(o_tmp, userp,
57678 + sizeof (struct acl_object_label)))
57679 + return -EFAULT;
57680 +
57681 + userp = o_tmp->prev;
57682 +
57683 + len = strnlen_user(o_tmp->filename, PATH_MAX);
57684 +
57685 + if (!len || len >= PATH_MAX)
57686 + return -EINVAL;
57687 +
57688 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57689 + return -ENOMEM;
57690 +
57691 + if (copy_from_user(tmp, o_tmp->filename, len))
57692 + return -EFAULT;
57693 + tmp[len-1] = '\0';
57694 + o_tmp->filename = tmp;
57695 +
57696 + insert_acl_obj_label(o_tmp, subj);
57697 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
57698 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
57699 + return -ENOMEM;
57700 +
57701 + ret = copy_user_glob(o_tmp);
57702 + if (ret)
57703 + return ret;
57704 +
57705 + if (o_tmp->nested) {
57706 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
57707 + if (IS_ERR(o_tmp->nested))
57708 + return PTR_ERR(o_tmp->nested);
57709 +
57710 + /* insert into nested subject list */
57711 + o_tmp->nested->next = role->hash->first;
57712 + role->hash->first = o_tmp->nested;
57713 + }
57714 + }
57715 +
57716 + return 0;
57717 +}
57718 +
57719 +static __u32
57720 +count_user_subjs(struct acl_subject_label *userp)
57721 +{
57722 + struct acl_subject_label s_tmp;
57723 + __u32 num = 0;
57724 +
57725 + while (userp) {
57726 + if (copy_from_user(&s_tmp, userp,
57727 + sizeof (struct acl_subject_label)))
57728 + break;
57729 +
57730 + userp = s_tmp.prev;
57731 + /* do not count nested subjects against this count, since
57732 + they are not included in the hash table, but are
57733 + attached to objects. We have already counted
57734 + the subjects in userspace for the allocation
57735 + stack
57736 + */
57737 + if (!(s_tmp.mode & GR_NESTED))
57738 + num++;
57739 + }
57740 +
57741 + return num;
57742 +}
57743 +
57744 +static int
57745 +copy_user_allowedips(struct acl_role_label *rolep)
57746 +{
57747 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57748 +
57749 + ruserip = rolep->allowed_ips;
57750 +
57751 + while (ruserip) {
57752 + rlast = rtmp;
57753 +
57754 + if ((rtmp = (struct role_allowed_ip *)
57755 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57756 + return -ENOMEM;
57757 +
57758 + if (copy_from_user(rtmp, ruserip,
57759 + sizeof (struct role_allowed_ip)))
57760 + return -EFAULT;
57761 +
57762 + ruserip = rtmp->prev;
57763 +
57764 + if (!rlast) {
57765 + rtmp->prev = NULL;
57766 + rolep->allowed_ips = rtmp;
57767 + } else {
57768 + rlast->next = rtmp;
57769 + rtmp->prev = rlast;
57770 + }
57771 +
57772 + if (!ruserip)
57773 + rtmp->next = NULL;
57774 + }
57775 +
57776 + return 0;
57777 +}
57778 +
57779 +static int
57780 +copy_user_transitions(struct acl_role_label *rolep)
57781 +{
57782 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
57783 +
57784 + unsigned int len;
57785 + char *tmp;
57786 +
57787 + rusertp = rolep->transitions;
57788 +
57789 + while (rusertp) {
57790 + rlast = rtmp;
57791 +
57792 + if ((rtmp = (struct role_transition *)
57793 + acl_alloc(sizeof (struct role_transition))) == NULL)
57794 + return -ENOMEM;
57795 +
57796 + if (copy_from_user(rtmp, rusertp,
57797 + sizeof (struct role_transition)))
57798 + return -EFAULT;
57799 +
57800 + rusertp = rtmp->prev;
57801 +
57802 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57803 +
57804 + if (!len || len >= GR_SPROLE_LEN)
57805 + return -EINVAL;
57806 +
57807 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57808 + return -ENOMEM;
57809 +
57810 + if (copy_from_user(tmp, rtmp->rolename, len))
57811 + return -EFAULT;
57812 + tmp[len-1] = '\0';
57813 + rtmp->rolename = tmp;
57814 +
57815 + if (!rlast) {
57816 + rtmp->prev = NULL;
57817 + rolep->transitions = rtmp;
57818 + } else {
57819 + rlast->next = rtmp;
57820 + rtmp->prev = rlast;
57821 + }
57822 +
57823 + if (!rusertp)
57824 + rtmp->next = NULL;
57825 + }
57826 +
57827 + return 0;
57828 +}
57829 +
57830 +static struct acl_subject_label *
57831 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
57832 +{
57833 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57834 + unsigned int len;
57835 + char *tmp;
57836 + __u32 num_objs;
57837 + struct acl_ip_label **i_tmp, *i_utmp2;
57838 + struct gr_hash_struct ghash;
57839 + struct subject_map *subjmap;
57840 + unsigned int i_num;
57841 + int err;
57842 +
57843 + s_tmp = lookup_subject_map(userp);
57844 +
57845 + /* we've already copied this subject into the kernel, just return
57846 + the reference to it, and don't copy it over again
57847 + */
57848 + if (s_tmp)
57849 + return(s_tmp);
57850 +
57851 + if ((s_tmp = (struct acl_subject_label *)
57852 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
57853 + return ERR_PTR(-ENOMEM);
57854 +
57855 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
57856 + if (subjmap == NULL)
57857 + return ERR_PTR(-ENOMEM);
57858 +
57859 + subjmap->user = userp;
57860 + subjmap->kernel = s_tmp;
57861 + insert_subj_map_entry(subjmap);
57862 +
57863 + if (copy_from_user(s_tmp, userp,
57864 + sizeof (struct acl_subject_label)))
57865 + return ERR_PTR(-EFAULT);
57866 +
57867 + len = strnlen_user(s_tmp->filename, PATH_MAX);
57868 +
57869 + if (!len || len >= PATH_MAX)
57870 + return ERR_PTR(-EINVAL);
57871 +
57872 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57873 + return ERR_PTR(-ENOMEM);
57874 +
57875 + if (copy_from_user(tmp, s_tmp->filename, len))
57876 + return ERR_PTR(-EFAULT);
57877 + tmp[len-1] = '\0';
57878 + s_tmp->filename = tmp;
57879 +
57880 + if (!strcmp(s_tmp->filename, "/"))
57881 + role->root_label = s_tmp;
57882 +
57883 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
57884 + return ERR_PTR(-EFAULT);
57885 +
57886 + /* copy user and group transition tables */
57887 +
57888 + if (s_tmp->user_trans_num) {
57889 + uid_t *uidlist;
57890 +
57891 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
57892 + if (uidlist == NULL)
57893 + return ERR_PTR(-ENOMEM);
57894 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
57895 + return ERR_PTR(-EFAULT);
57896 +
57897 + s_tmp->user_transitions = uidlist;
57898 + }
57899 +
57900 + if (s_tmp->group_trans_num) {
57901 + gid_t *gidlist;
57902 +
57903 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
57904 + if (gidlist == NULL)
57905 + return ERR_PTR(-ENOMEM);
57906 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
57907 + return ERR_PTR(-EFAULT);
57908 +
57909 + s_tmp->group_transitions = gidlist;
57910 + }
57911 +
57912 + /* set up object hash table */
57913 + num_objs = count_user_objs(ghash.first);
57914 +
57915 + s_tmp->obj_hash_size = num_objs;
57916 + s_tmp->obj_hash =
57917 + (struct acl_object_label **)
57918 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
57919 +
57920 + if (!s_tmp->obj_hash)
57921 + return ERR_PTR(-ENOMEM);
57922 +
57923 + memset(s_tmp->obj_hash, 0,
57924 + s_tmp->obj_hash_size *
57925 + sizeof (struct acl_object_label *));
57926 +
57927 + /* add in objects */
57928 + err = copy_user_objs(ghash.first, s_tmp, role);
57929 +
57930 + if (err)
57931 + return ERR_PTR(err);
57932 +
57933 + /* set pointer for parent subject */
57934 + if (s_tmp->parent_subject) {
57935 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
57936 +
57937 + if (IS_ERR(s_tmp2))
57938 + return s_tmp2;
57939 +
57940 + s_tmp->parent_subject = s_tmp2;
57941 + }
57942 +
57943 + /* add in ip acls */
57944 +
57945 + if (!s_tmp->ip_num) {
57946 + s_tmp->ips = NULL;
57947 + goto insert;
57948 + }
57949 +
57950 + i_tmp =
57951 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
57952 + sizeof (struct acl_ip_label *));
57953 +
57954 + if (!i_tmp)
57955 + return ERR_PTR(-ENOMEM);
57956 +
57957 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
57958 + *(i_tmp + i_num) =
57959 + (struct acl_ip_label *)
57960 + acl_alloc(sizeof (struct acl_ip_label));
57961 + if (!*(i_tmp + i_num))
57962 + return ERR_PTR(-ENOMEM);
57963 +
57964 + if (copy_from_user
57965 + (&i_utmp2, s_tmp->ips + i_num,
57966 + sizeof (struct acl_ip_label *)))
57967 + return ERR_PTR(-EFAULT);
57968 +
57969 + if (copy_from_user
57970 + (*(i_tmp + i_num), i_utmp2,
57971 + sizeof (struct acl_ip_label)))
57972 + return ERR_PTR(-EFAULT);
57973 +
57974 + if ((*(i_tmp + i_num))->iface == NULL)
57975 + continue;
57976 +
57977 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
57978 + if (!len || len >= IFNAMSIZ)
57979 + return ERR_PTR(-EINVAL);
57980 + tmp = acl_alloc(len);
57981 + if (tmp == NULL)
57982 + return ERR_PTR(-ENOMEM);
57983 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
57984 + return ERR_PTR(-EFAULT);
57985 + (*(i_tmp + i_num))->iface = tmp;
57986 + }
57987 +
57988 + s_tmp->ips = i_tmp;
57989 +
57990 +insert:
57991 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
57992 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
57993 + return ERR_PTR(-ENOMEM);
57994 +
57995 + return s_tmp;
57996 +}
57997 +
57998 +static int
57999 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
58000 +{
58001 + struct acl_subject_label s_pre;
58002 + struct acl_subject_label * ret;
58003 + int err;
58004 +
58005 + while (userp) {
58006 + if (copy_from_user(&s_pre, userp,
58007 + sizeof (struct acl_subject_label)))
58008 + return -EFAULT;
58009 +
58010 + /* do not add nested subjects here, add
58011 + while parsing objects
58012 + */
58013 +
58014 + if (s_pre.mode & GR_NESTED) {
58015 + userp = s_pre.prev;
58016 + continue;
58017 + }
58018 +
58019 + ret = do_copy_user_subj(userp, role);
58020 +
58021 + err = PTR_ERR(ret);
58022 + if (IS_ERR(ret))
58023 + return err;
58024 +
58025 + insert_acl_subj_label(ret, role);
58026 +
58027 + userp = s_pre.prev;
58028 + }
58029 +
58030 + return 0;
58031 +}
58032 +
58033 +static int
58034 +copy_user_acl(struct gr_arg *arg)
58035 +{
58036 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
58037 + struct sprole_pw *sptmp;
58038 + struct gr_hash_struct *ghash;
58039 + uid_t *domainlist;
58040 + unsigned int r_num;
58041 + unsigned int len;
58042 + char *tmp;
58043 + int err = 0;
58044 + __u16 i;
58045 + __u32 num_subjs;
58046 +
58047 + /* we need a default and kernel role */
58048 + if (arg->role_db.num_roles < 2)
58049 + return -EINVAL;
58050 +
58051 + /* copy special role authentication info from userspace */
58052 +
58053 + num_sprole_pws = arg->num_sprole_pws;
58054 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
58055 +
58056 + if (!acl_special_roles) {
58057 + err = -ENOMEM;
58058 + goto cleanup;
58059 + }
58060 +
58061 + for (i = 0; i < num_sprole_pws; i++) {
58062 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
58063 + if (!sptmp) {
58064 + err = -ENOMEM;
58065 + goto cleanup;
58066 + }
58067 + if (copy_from_user(sptmp, arg->sprole_pws + i,
58068 + sizeof (struct sprole_pw))) {
58069 + err = -EFAULT;
58070 + goto cleanup;
58071 + }
58072 +
58073 + len =
58074 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
58075 +
58076 + if (!len || len >= GR_SPROLE_LEN) {
58077 + err = -EINVAL;
58078 + goto cleanup;
58079 + }
58080 +
58081 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
58082 + err = -ENOMEM;
58083 + goto cleanup;
58084 + }
58085 +
58086 + if (copy_from_user(tmp, sptmp->rolename, len)) {
58087 + err = -EFAULT;
58088 + goto cleanup;
58089 + }
58090 + tmp[len-1] = '\0';
58091 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58092 + printk(KERN_ALERT "Copying special role %s\n", tmp);
58093 +#endif
58094 + sptmp->rolename = tmp;
58095 + acl_special_roles[i] = sptmp;
58096 + }
58097 +
58098 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
58099 +
58100 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
58101 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
58102 +
58103 + if (!r_tmp) {
58104 + err = -ENOMEM;
58105 + goto cleanup;
58106 + }
58107 +
58108 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
58109 + sizeof (struct acl_role_label *))) {
58110 + err = -EFAULT;
58111 + goto cleanup;
58112 + }
58113 +
58114 + if (copy_from_user(r_tmp, r_utmp2,
58115 + sizeof (struct acl_role_label))) {
58116 + err = -EFAULT;
58117 + goto cleanup;
58118 + }
58119 +
58120 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
58121 +
58122 + if (!len || len >= PATH_MAX) {
58123 + err = -EINVAL;
58124 + goto cleanup;
58125 + }
58126 +
58127 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
58128 + err = -ENOMEM;
58129 + goto cleanup;
58130 + }
58131 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
58132 + err = -EFAULT;
58133 + goto cleanup;
58134 + }
58135 + tmp[len-1] = '\0';
58136 + r_tmp->rolename = tmp;
58137 +
58138 + if (!strcmp(r_tmp->rolename, "default")
58139 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
58140 + default_role = r_tmp;
58141 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
58142 + kernel_role = r_tmp;
58143 + }
58144 +
58145 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
58146 + err = -ENOMEM;
58147 + goto cleanup;
58148 + }
58149 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
58150 + err = -EFAULT;
58151 + goto cleanup;
58152 + }
58153 +
58154 + r_tmp->hash = ghash;
58155 +
58156 + num_subjs = count_user_subjs(r_tmp->hash->first);
58157 +
58158 + r_tmp->subj_hash_size = num_subjs;
58159 + r_tmp->subj_hash =
58160 + (struct acl_subject_label **)
58161 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
58162 +
58163 + if (!r_tmp->subj_hash) {
58164 + err = -ENOMEM;
58165 + goto cleanup;
58166 + }
58167 +
58168 + err = copy_user_allowedips(r_tmp);
58169 + if (err)
58170 + goto cleanup;
58171 +
58172 + /* copy domain info */
58173 + if (r_tmp->domain_children != NULL) {
58174 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
58175 + if (domainlist == NULL) {
58176 + err = -ENOMEM;
58177 + goto cleanup;
58178 + }
58179 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
58180 + err = -EFAULT;
58181 + goto cleanup;
58182 + }
58183 + r_tmp->domain_children = domainlist;
58184 + }
58185 +
58186 + err = copy_user_transitions(r_tmp);
58187 + if (err)
58188 + goto cleanup;
58189 +
58190 + memset(r_tmp->subj_hash, 0,
58191 + r_tmp->subj_hash_size *
58192 + sizeof (struct acl_subject_label *));
58193 +
58194 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
58195 +
58196 + if (err)
58197 + goto cleanup;
58198 +
58199 + /* set nested subject list to null */
58200 + r_tmp->hash->first = NULL;
58201 +
58202 + insert_acl_role_label(r_tmp);
58203 + }
58204 +
58205 + goto return_err;
58206 + cleanup:
58207 + free_variables();
58208 + return_err:
58209 + return err;
58210 +
58211 +}
58212 +
58213 +static int
58214 +gracl_init(struct gr_arg *args)
58215 +{
58216 + int error = 0;
58217 +
58218 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
58219 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
58220 +
58221 + if (init_variables(args)) {
58222 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
58223 + error = -ENOMEM;
58224 + free_variables();
58225 + goto out;
58226 + }
58227 +
58228 + error = copy_user_acl(args);
58229 + free_init_variables();
58230 + if (error) {
58231 + free_variables();
58232 + goto out;
58233 + }
58234 +
58235 + if ((error = gr_set_acls(0))) {
58236 + free_variables();
58237 + goto out;
58238 + }
58239 +
58240 + pax_open_kernel();
58241 + gr_status |= GR_READY;
58242 + pax_close_kernel();
58243 +
58244 + out:
58245 + return error;
58246 +}
58247 +
58248 +/* derived from glibc fnmatch() 0: match, 1: no match*/
58249 +
58250 +static int
58251 +glob_match(const char *p, const char *n)
58252 +{
58253 + char c;
58254 +
58255 + while ((c = *p++) != '\0') {
58256 + switch (c) {
58257 + case '?':
58258 + if (*n == '\0')
58259 + return 1;
58260 + else if (*n == '/')
58261 + return 1;
58262 + break;
58263 + case '\\':
58264 + if (*n != c)
58265 + return 1;
58266 + break;
58267 + case '*':
58268 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
58269 + if (*n == '/')
58270 + return 1;
58271 + else if (c == '?') {
58272 + if (*n == '\0')
58273 + return 1;
58274 + else
58275 + ++n;
58276 + }
58277 + }
58278 + if (c == '\0') {
58279 + return 0;
58280 + } else {
58281 + const char *endp;
58282 +
58283 + if ((endp = strchr(n, '/')) == NULL)
58284 + endp = n + strlen(n);
58285 +
58286 + if (c == '[') {
58287 + for (--p; n < endp; ++n)
58288 + if (!glob_match(p, n))
58289 + return 0;
58290 + } else if (c == '/') {
58291 + while (*n != '\0' && *n != '/')
58292 + ++n;
58293 + if (*n == '/' && !glob_match(p, n + 1))
58294 + return 0;
58295 + } else {
58296 + for (--p; n < endp; ++n)
58297 + if (*n == c && !glob_match(p, n))
58298 + return 0;
58299 + }
58300 +
58301 + return 1;
58302 + }
58303 + case '[':
58304 + {
58305 + int not;
58306 + char cold;
58307 +
58308 + if (*n == '\0' || *n == '/')
58309 + return 1;
58310 +
58311 + not = (*p == '!' || *p == '^');
58312 + if (not)
58313 + ++p;
58314 +
58315 + c = *p++;
58316 + for (;;) {
58317 + unsigned char fn = (unsigned char)*n;
58318 +
58319 + if (c == '\0')
58320 + return 1;
58321 + else {
58322 + if (c == fn)
58323 + goto matched;
58324 + cold = c;
58325 + c = *p++;
58326 +
58327 + if (c == '-' && *p != ']') {
58328 + unsigned char cend = *p++;
58329 +
58330 + if (cend == '\0')
58331 + return 1;
58332 +
58333 + if (cold <= fn && fn <= cend)
58334 + goto matched;
58335 +
58336 + c = *p++;
58337 + }
58338 + }
58339 +
58340 + if (c == ']')
58341 + break;
58342 + }
58343 + if (!not)
58344 + return 1;
58345 + break;
58346 + matched:
58347 + while (c != ']') {
58348 + if (c == '\0')
58349 + return 1;
58350 +
58351 + c = *p++;
58352 + }
58353 + if (not)
58354 + return 1;
58355 + }
58356 + break;
58357 + default:
58358 + if (c != *n)
58359 + return 1;
58360 + }
58361 +
58362 + ++n;
58363 + }
58364 +
58365 + if (*n == '\0')
58366 + return 0;
58367 +
58368 + if (*n == '/')
58369 + return 0;
58370 +
58371 + return 1;
58372 +}
58373 +
58374 +static struct acl_object_label *
58375 +chk_glob_label(struct acl_object_label *globbed,
58376 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
58377 +{
58378 + struct acl_object_label *tmp;
58379 +
58380 + if (*path == NULL)
58381 + *path = gr_to_filename_nolock(dentry, mnt);
58382 +
58383 + tmp = globbed;
58384 +
58385 + while (tmp) {
58386 + if (!glob_match(tmp->filename, *path))
58387 + return tmp;
58388 + tmp = tmp->next;
58389 + }
58390 +
58391 + return NULL;
58392 +}
58393 +
58394 +static struct acl_object_label *
58395 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58396 + const ino_t curr_ino, const dev_t curr_dev,
58397 + const struct acl_subject_label *subj, char **path, const int checkglob)
58398 +{
58399 + struct acl_subject_label *tmpsubj;
58400 + struct acl_object_label *retval;
58401 + struct acl_object_label *retval2;
58402 +
58403 + tmpsubj = (struct acl_subject_label *) subj;
58404 + read_lock(&gr_inode_lock);
58405 + do {
58406 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
58407 + if (retval) {
58408 + if (checkglob && retval->globbed) {
58409 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
58410 + if (retval2)
58411 + retval = retval2;
58412 + }
58413 + break;
58414 + }
58415 + } while ((tmpsubj = tmpsubj->parent_subject));
58416 + read_unlock(&gr_inode_lock);
58417 +
58418 + return retval;
58419 +}
58420 +
58421 +static __inline__ struct acl_object_label *
58422 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58423 + const struct dentry *curr_dentry,
58424 + const struct acl_subject_label *subj, char **path, const int checkglob)
58425 +{
58426 + int newglob = checkglob;
58427 +
58428 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
58429 + as we don't want a / * rule to match instead of the / object
58430 + don't do this for create lookups that call this function though, since they're looking up
58431 + on the parent and thus need globbing checks on all paths
58432 + */
58433 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
58434 + newglob = GR_NO_GLOB;
58435 +
58436 + return __full_lookup(orig_dentry, orig_mnt,
58437 + curr_dentry->d_inode->i_ino,
58438 + __get_dev(curr_dentry), subj, path, newglob);
58439 +}
58440 +
58441 +static struct acl_object_label *
58442 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58443 + const struct acl_subject_label *subj, char *path, const int checkglob)
58444 +{
58445 + struct dentry *dentry = (struct dentry *) l_dentry;
58446 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58447 + struct acl_object_label *retval;
58448 +
58449 + spin_lock(&dcache_lock);
58450 + spin_lock(&vfsmount_lock);
58451 +
58452 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
58453 +#ifdef CONFIG_NET
58454 + mnt == sock_mnt ||
58455 +#endif
58456 +#ifdef CONFIG_HUGETLBFS
58457 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
58458 +#endif
58459 + /* ignore Eric Biederman */
58460 + IS_PRIVATE(l_dentry->d_inode))) {
58461 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58462 + goto out;
58463 + }
58464 +
58465 + for (;;) {
58466 + if (dentry == real_root && mnt == real_root_mnt)
58467 + break;
58468 +
58469 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58470 + if (mnt->mnt_parent == mnt)
58471 + break;
58472 +
58473 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58474 + if (retval != NULL)
58475 + goto out;
58476 +
58477 + dentry = mnt->mnt_mountpoint;
58478 + mnt = mnt->mnt_parent;
58479 + continue;
58480 + }
58481 +
58482 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58483 + if (retval != NULL)
58484 + goto out;
58485 +
58486 + dentry = dentry->d_parent;
58487 + }
58488 +
58489 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58490 +
58491 + if (retval == NULL)
58492 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
58493 +out:
58494 + spin_unlock(&vfsmount_lock);
58495 + spin_unlock(&dcache_lock);
58496 +
58497 + BUG_ON(retval == NULL);
58498 +
58499 + return retval;
58500 +}
58501 +
58502 +static __inline__ struct acl_object_label *
58503 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58504 + const struct acl_subject_label *subj)
58505 +{
58506 + char *path = NULL;
58507 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
58508 +}
58509 +
58510 +static __inline__ struct acl_object_label *
58511 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58512 + const struct acl_subject_label *subj)
58513 +{
58514 + char *path = NULL;
58515 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
58516 +}
58517 +
58518 +static __inline__ struct acl_object_label *
58519 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58520 + const struct acl_subject_label *subj, char *path)
58521 +{
58522 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
58523 +}
58524 +
58525 +static struct acl_subject_label *
58526 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58527 + const struct acl_role_label *role)
58528 +{
58529 + struct dentry *dentry = (struct dentry *) l_dentry;
58530 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58531 + struct acl_subject_label *retval;
58532 +
58533 + spin_lock(&dcache_lock);
58534 + spin_lock(&vfsmount_lock);
58535 +
58536 + for (;;) {
58537 + if (dentry == real_root && mnt == real_root_mnt)
58538 + break;
58539 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58540 + if (mnt->mnt_parent == mnt)
58541 + break;
58542 +
58543 + read_lock(&gr_inode_lock);
58544 + retval =
58545 + lookup_acl_subj_label(dentry->d_inode->i_ino,
58546 + __get_dev(dentry), role);
58547 + read_unlock(&gr_inode_lock);
58548 + if (retval != NULL)
58549 + goto out;
58550 +
58551 + dentry = mnt->mnt_mountpoint;
58552 + mnt = mnt->mnt_parent;
58553 + continue;
58554 + }
58555 +
58556 + read_lock(&gr_inode_lock);
58557 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58558 + __get_dev(dentry), role);
58559 + read_unlock(&gr_inode_lock);
58560 + if (retval != NULL)
58561 + goto out;
58562 +
58563 + dentry = dentry->d_parent;
58564 + }
58565 +
58566 + read_lock(&gr_inode_lock);
58567 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58568 + __get_dev(dentry), role);
58569 + read_unlock(&gr_inode_lock);
58570 +
58571 + if (unlikely(retval == NULL)) {
58572 + read_lock(&gr_inode_lock);
58573 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
58574 + __get_dev(real_root), role);
58575 + read_unlock(&gr_inode_lock);
58576 + }
58577 +out:
58578 + spin_unlock(&vfsmount_lock);
58579 + spin_unlock(&dcache_lock);
58580 +
58581 + BUG_ON(retval == NULL);
58582 +
58583 + return retval;
58584 +}
58585 +
58586 +static void
58587 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
58588 +{
58589 + struct task_struct *task = current;
58590 + const struct cred *cred = current_cred();
58591 +
58592 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58593 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58594 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58595 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58596 +
58597 + return;
58598 +}
58599 +
58600 +static void
58601 +gr_log_learn_sysctl(const char *path, const __u32 mode)
58602 +{
58603 + struct task_struct *task = current;
58604 + const struct cred *cred = current_cred();
58605 +
58606 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58607 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58608 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58609 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
58610 +
58611 + return;
58612 +}
58613 +
58614 +static void
58615 +gr_log_learn_id_change(const char type, const unsigned int real,
58616 + const unsigned int effective, const unsigned int fs)
58617 +{
58618 + struct task_struct *task = current;
58619 + const struct cred *cred = current_cred();
58620 +
58621 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58622 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58623 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58624 + type, real, effective, fs, &task->signal->saved_ip);
58625 +
58626 + return;
58627 +}
58628 +
58629 +__u32
58630 +gr_search_file(const struct dentry * dentry, const __u32 mode,
58631 + const struct vfsmount * mnt)
58632 +{
58633 + __u32 retval = mode;
58634 + struct acl_subject_label *curracl;
58635 + struct acl_object_label *currobj;
58636 +
58637 + if (unlikely(!(gr_status & GR_READY)))
58638 + return (mode & ~GR_AUDITS);
58639 +
58640 + curracl = current->acl;
58641 +
58642 + currobj = chk_obj_label(dentry, mnt, curracl);
58643 + retval = currobj->mode & mode;
58644 +
58645 + /* if we're opening a specified transfer file for writing
58646 + (e.g. /dev/initctl), then transfer our role to init
58647 + */
58648 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
58649 + current->role->roletype & GR_ROLE_PERSIST)) {
58650 + struct task_struct *task = init_pid_ns.child_reaper;
58651 +
58652 + if (task->role != current->role) {
58653 + task->acl_sp_role = 0;
58654 + task->acl_role_id = current->acl_role_id;
58655 + task->role = current->role;
58656 + rcu_read_lock();
58657 + read_lock(&grsec_exec_file_lock);
58658 + gr_apply_subject_to_task(task);
58659 + read_unlock(&grsec_exec_file_lock);
58660 + rcu_read_unlock();
58661 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
58662 + }
58663 + }
58664 +
58665 + if (unlikely
58666 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
58667 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
58668 + __u32 new_mode = mode;
58669 +
58670 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58671 +
58672 + retval = new_mode;
58673 +
58674 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
58675 + new_mode |= GR_INHERIT;
58676 +
58677 + if (!(mode & GR_NOLEARN))
58678 + gr_log_learn(dentry, mnt, new_mode);
58679 + }
58680 +
58681 + return retval;
58682 +}
58683 +
58684 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
58685 + const struct dentry *parent,
58686 + const struct vfsmount *mnt)
58687 +{
58688 + struct name_entry *match;
58689 + struct acl_object_label *matchpo;
58690 + struct acl_subject_label *curracl;
58691 + char *path;
58692 +
58693 + if (unlikely(!(gr_status & GR_READY)))
58694 + return NULL;
58695 +
58696 + preempt_disable();
58697 + path = gr_to_filename_rbac(new_dentry, mnt);
58698 + match = lookup_name_entry_create(path);
58699 +
58700 + curracl = current->acl;
58701 +
58702 + if (match) {
58703 + read_lock(&gr_inode_lock);
58704 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
58705 + read_unlock(&gr_inode_lock);
58706 +
58707 + if (matchpo) {
58708 + preempt_enable();
58709 + return matchpo;
58710 + }
58711 + }
58712 +
58713 + // lookup parent
58714 +
58715 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
58716 +
58717 + preempt_enable();
58718 + return matchpo;
58719 +}
58720 +
58721 +__u32
58722 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58723 + const struct vfsmount * mnt, const __u32 mode)
58724 +{
58725 + struct acl_object_label *matchpo;
58726 + __u32 retval;
58727 +
58728 + if (unlikely(!(gr_status & GR_READY)))
58729 + return (mode & ~GR_AUDITS);
58730 +
58731 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
58732 +
58733 + retval = matchpo->mode & mode;
58734 +
58735 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58736 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58737 + __u32 new_mode = mode;
58738 +
58739 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58740 +
58741 + gr_log_learn(new_dentry, mnt, new_mode);
58742 + return new_mode;
58743 + }
58744 +
58745 + return retval;
58746 +}
58747 +
58748 +__u32
58749 +gr_check_link(const struct dentry * new_dentry,
58750 + const struct dentry * parent_dentry,
58751 + const struct vfsmount * parent_mnt,
58752 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58753 +{
58754 + struct acl_object_label *obj;
58755 + __u32 oldmode, newmode;
58756 + __u32 needmode;
58757 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58758 + GR_DELETE | GR_INHERIT;
58759 +
58760 + if (unlikely(!(gr_status & GR_READY)))
58761 + return (GR_CREATE | GR_LINK);
58762 +
58763 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58764 + oldmode = obj->mode;
58765 +
58766 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58767 + newmode = obj->mode;
58768 +
58769 + needmode = newmode & checkmodes;
58770 +
58771 + // old name for hardlink must have at least the permissions of the new name
58772 + if ((oldmode & needmode) != needmode)
58773 + goto bad;
58774 +
58775 + // if old name had restrictions/auditing, make sure the new name does as well
58776 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58777 +
58778 + // don't allow hardlinking of suid/sgid files without permission
58779 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58780 + needmode |= GR_SETID;
58781 +
58782 + if ((newmode & needmode) != needmode)
58783 + goto bad;
58784 +
58785 + // enforce minimum permissions
58786 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58787 + return newmode;
58788 +bad:
58789 + needmode = oldmode;
58790 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58791 + needmode |= GR_SETID;
58792 +
58793 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58794 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58795 + return (GR_CREATE | GR_LINK);
58796 + } else if (newmode & GR_SUPPRESS)
58797 + return GR_SUPPRESS;
58798 + else
58799 + return 0;
58800 +}
58801 +
58802 +int
58803 +gr_check_hidden_task(const struct task_struct *task)
58804 +{
58805 + if (unlikely(!(gr_status & GR_READY)))
58806 + return 0;
58807 +
58808 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58809 + return 1;
58810 +
58811 + return 0;
58812 +}
58813 +
58814 +int
58815 +gr_check_protected_task(const struct task_struct *task)
58816 +{
58817 + if (unlikely(!(gr_status & GR_READY) || !task))
58818 + return 0;
58819 +
58820 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58821 + task->acl != current->acl)
58822 + return 1;
58823 +
58824 + return 0;
58825 +}
58826 +
58827 +int
58828 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58829 +{
58830 + struct task_struct *p;
58831 + int ret = 0;
58832 +
58833 + if (unlikely(!(gr_status & GR_READY) || !pid))
58834 + return ret;
58835 +
58836 + read_lock(&tasklist_lock);
58837 + do_each_pid_task(pid, type, p) {
58838 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58839 + p->acl != current->acl) {
58840 + ret = 1;
58841 + goto out;
58842 + }
58843 + } while_each_pid_task(pid, type, p);
58844 +out:
58845 + read_unlock(&tasklist_lock);
58846 +
58847 + return ret;
58848 +}
58849 +
58850 +void
58851 +gr_copy_label(struct task_struct *tsk)
58852 +{
58853 + /* plain copying of fields is already done by dup_task_struct */
58854 + tsk->signal->used_accept = 0;
58855 + tsk->acl_sp_role = 0;
58856 + //tsk->acl_role_id = current->acl_role_id;
58857 + //tsk->acl = current->acl;
58858 + //tsk->role = current->role;
58859 + tsk->signal->curr_ip = current->signal->curr_ip;
58860 + tsk->signal->saved_ip = current->signal->saved_ip;
58861 + if (current->exec_file)
58862 + get_file(current->exec_file);
58863 + //tsk->exec_file = current->exec_file;
58864 + //tsk->is_writable = current->is_writable;
58865 + if (unlikely(current->signal->used_accept)) {
58866 + current->signal->curr_ip = 0;
58867 + current->signal->saved_ip = 0;
58868 + }
58869 +
58870 + return;
58871 +}
58872 +
58873 +static void
58874 +gr_set_proc_res(struct task_struct *task)
58875 +{
58876 + struct acl_subject_label *proc;
58877 + unsigned short i;
58878 +
58879 + proc = task->acl;
58880 +
58881 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
58882 + return;
58883 +
58884 + for (i = 0; i < RLIM_NLIMITS; i++) {
58885 + if (!(proc->resmask & (1 << i)))
58886 + continue;
58887 +
58888 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
58889 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
58890 + }
58891 +
58892 + return;
58893 +}
58894 +
58895 +extern int __gr_process_user_ban(struct user_struct *user);
58896 +
58897 +int
58898 +gr_check_user_change(int real, int effective, int fs)
58899 +{
58900 + unsigned int i;
58901 + __u16 num;
58902 + uid_t *uidlist;
58903 + int curuid;
58904 + int realok = 0;
58905 + int effectiveok = 0;
58906 + int fsok = 0;
58907 +
58908 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58909 + struct user_struct *user;
58910 +
58911 + if (real == -1)
58912 + goto skipit;
58913 +
58914 + user = find_user(real);
58915 + if (user == NULL)
58916 + goto skipit;
58917 +
58918 + if (__gr_process_user_ban(user)) {
58919 + /* for find_user */
58920 + free_uid(user);
58921 + return 1;
58922 + }
58923 +
58924 + /* for find_user */
58925 + free_uid(user);
58926 +
58927 +skipit:
58928 +#endif
58929 +
58930 + if (unlikely(!(gr_status & GR_READY)))
58931 + return 0;
58932 +
58933 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58934 + gr_log_learn_id_change('u', real, effective, fs);
58935 +
58936 + num = current->acl->user_trans_num;
58937 + uidlist = current->acl->user_transitions;
58938 +
58939 + if (uidlist == NULL)
58940 + return 0;
58941 +
58942 + if (real == -1)
58943 + realok = 1;
58944 + if (effective == -1)
58945 + effectiveok = 1;
58946 + if (fs == -1)
58947 + fsok = 1;
58948 +
58949 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
58950 + for (i = 0; i < num; i++) {
58951 + curuid = (int)uidlist[i];
58952 + if (real == curuid)
58953 + realok = 1;
58954 + if (effective == curuid)
58955 + effectiveok = 1;
58956 + if (fs == curuid)
58957 + fsok = 1;
58958 + }
58959 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
58960 + for (i = 0; i < num; i++) {
58961 + curuid = (int)uidlist[i];
58962 + if (real == curuid)
58963 + break;
58964 + if (effective == curuid)
58965 + break;
58966 + if (fs == curuid)
58967 + break;
58968 + }
58969 + /* not in deny list */
58970 + if (i == num) {
58971 + realok = 1;
58972 + effectiveok = 1;
58973 + fsok = 1;
58974 + }
58975 + }
58976 +
58977 + if (realok && effectiveok && fsok)
58978 + return 0;
58979 + else {
58980 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58981 + return 1;
58982 + }
58983 +}
58984 +
58985 +int
58986 +gr_check_group_change(int real, int effective, int fs)
58987 +{
58988 + unsigned int i;
58989 + __u16 num;
58990 + gid_t *gidlist;
58991 + int curgid;
58992 + int realok = 0;
58993 + int effectiveok = 0;
58994 + int fsok = 0;
58995 +
58996 + if (unlikely(!(gr_status & GR_READY)))
58997 + return 0;
58998 +
58999 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59000 + gr_log_learn_id_change('g', real, effective, fs);
59001 +
59002 + num = current->acl->group_trans_num;
59003 + gidlist = current->acl->group_transitions;
59004 +
59005 + if (gidlist == NULL)
59006 + return 0;
59007 +
59008 + if (real == -1)
59009 + realok = 1;
59010 + if (effective == -1)
59011 + effectiveok = 1;
59012 + if (fs == -1)
59013 + fsok = 1;
59014 +
59015 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
59016 + for (i = 0; i < num; i++) {
59017 + curgid = (int)gidlist[i];
59018 + if (real == curgid)
59019 + realok = 1;
59020 + if (effective == curgid)
59021 + effectiveok = 1;
59022 + if (fs == curgid)
59023 + fsok = 1;
59024 + }
59025 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
59026 + for (i = 0; i < num; i++) {
59027 + curgid = (int)gidlist[i];
59028 + if (real == curgid)
59029 + break;
59030 + if (effective == curgid)
59031 + break;
59032 + if (fs == curgid)
59033 + break;
59034 + }
59035 + /* not in deny list */
59036 + if (i == num) {
59037 + realok = 1;
59038 + effectiveok = 1;
59039 + fsok = 1;
59040 + }
59041 + }
59042 +
59043 + if (realok && effectiveok && fsok)
59044 + return 0;
59045 + else {
59046 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
59047 + return 1;
59048 + }
59049 +}
59050 +
59051 +extern int gr_acl_is_capable(const int cap);
59052 +
59053 +void
59054 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
59055 +{
59056 + struct acl_role_label *role = task->role;
59057 + struct acl_subject_label *subj = NULL;
59058 + struct acl_object_label *obj;
59059 + struct file *filp;
59060 +
59061 + if (unlikely(!(gr_status & GR_READY)))
59062 + return;
59063 +
59064 + filp = task->exec_file;
59065 +
59066 + /* kernel process, we'll give them the kernel role */
59067 + if (unlikely(!filp)) {
59068 + task->role = kernel_role;
59069 + task->acl = kernel_role->root_label;
59070 + return;
59071 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
59072 + role = lookup_acl_role_label(task, uid, gid);
59073 +
59074 + /* don't change the role if we're not a privileged process */
59075 + if (role && task->role != role &&
59076 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
59077 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
59078 + return;
59079 +
59080 + /* perform subject lookup in possibly new role
59081 + we can use this result below in the case where role == task->role
59082 + */
59083 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
59084 +
59085 + /* if we changed uid/gid, but result in the same role
59086 + and are using inheritance, don't lose the inherited subject
59087 + if current subject is other than what normal lookup
59088 + would result in, we arrived via inheritance, don't
59089 + lose subject
59090 + */
59091 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
59092 + (subj == task->acl)))
59093 + task->acl = subj;
59094 +
59095 + task->role = role;
59096 +
59097 + task->is_writable = 0;
59098 +
59099 + /* ignore additional mmap checks for processes that are writable
59100 + by the default ACL */
59101 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59102 + if (unlikely(obj->mode & GR_WRITE))
59103 + task->is_writable = 1;
59104 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59105 + if (unlikely(obj->mode & GR_WRITE))
59106 + task->is_writable = 1;
59107 +
59108 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59109 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59110 +#endif
59111 +
59112 + gr_set_proc_res(task);
59113 +
59114 + return;
59115 +}
59116 +
59117 +int
59118 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
59119 + const int unsafe_flags)
59120 +{
59121 + struct task_struct *task = current;
59122 + struct acl_subject_label *newacl;
59123 + struct acl_object_label *obj;
59124 + __u32 retmode;
59125 +
59126 + if (unlikely(!(gr_status & GR_READY)))
59127 + return 0;
59128 +
59129 + newacl = chk_subj_label(dentry, mnt, task->role);
59130 +
59131 + task_lock(task);
59132 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
59133 + !(task->role->roletype & GR_ROLE_GOD) &&
59134 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
59135 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
59136 + task_unlock(task);
59137 + if (unsafe_flags & LSM_UNSAFE_SHARE)
59138 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
59139 + else
59140 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
59141 + return -EACCES;
59142 + }
59143 + task_unlock(task);
59144 +
59145 + obj = chk_obj_label(dentry, mnt, task->acl);
59146 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
59147 +
59148 + if (!(task->acl->mode & GR_INHERITLEARN) &&
59149 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
59150 + if (obj->nested)
59151 + task->acl = obj->nested;
59152 + else
59153 + task->acl = newacl;
59154 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
59155 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
59156 +
59157 + task->is_writable = 0;
59158 +
59159 + /* ignore additional mmap checks for processes that are writable
59160 + by the default ACL */
59161 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
59162 + if (unlikely(obj->mode & GR_WRITE))
59163 + task->is_writable = 1;
59164 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
59165 + if (unlikely(obj->mode & GR_WRITE))
59166 + task->is_writable = 1;
59167 +
59168 + gr_set_proc_res(task);
59169 +
59170 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59171 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59172 +#endif
59173 + return 0;
59174 +}
59175 +
59176 +/* always called with valid inodev ptr */
59177 +static void
59178 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
59179 +{
59180 + struct acl_object_label *matchpo;
59181 + struct acl_subject_label *matchps;
59182 + struct acl_subject_label *subj;
59183 + struct acl_role_label *role;
59184 + unsigned int x;
59185 +
59186 + FOR_EACH_ROLE_START(role)
59187 + FOR_EACH_SUBJECT_START(role, subj, x)
59188 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
59189 + matchpo->mode |= GR_DELETED;
59190 + FOR_EACH_SUBJECT_END(subj,x)
59191 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
59192 + if (subj->inode == ino && subj->device == dev)
59193 + subj->mode |= GR_DELETED;
59194 + FOR_EACH_NESTED_SUBJECT_END(subj)
59195 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
59196 + matchps->mode |= GR_DELETED;
59197 + FOR_EACH_ROLE_END(role)
59198 +
59199 + inodev->nentry->deleted = 1;
59200 +
59201 + return;
59202 +}
59203 +
59204 +void
59205 +gr_handle_delete(const ino_t ino, const dev_t dev)
59206 +{
59207 + struct inodev_entry *inodev;
59208 +
59209 + if (unlikely(!(gr_status & GR_READY)))
59210 + return;
59211 +
59212 + write_lock(&gr_inode_lock);
59213 + inodev = lookup_inodev_entry(ino, dev);
59214 + if (inodev != NULL)
59215 + do_handle_delete(inodev, ino, dev);
59216 + write_unlock(&gr_inode_lock);
59217 +
59218 + return;
59219 +}
59220 +
59221 +static void
59222 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
59223 + const ino_t newinode, const dev_t newdevice,
59224 + struct acl_subject_label *subj)
59225 +{
59226 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
59227 + struct acl_object_label *match;
59228 +
59229 + match = subj->obj_hash[index];
59230 +
59231 + while (match && (match->inode != oldinode ||
59232 + match->device != olddevice ||
59233 + !(match->mode & GR_DELETED)))
59234 + match = match->next;
59235 +
59236 + if (match && (match->inode == oldinode)
59237 + && (match->device == olddevice)
59238 + && (match->mode & GR_DELETED)) {
59239 + if (match->prev == NULL) {
59240 + subj->obj_hash[index] = match->next;
59241 + if (match->next != NULL)
59242 + match->next->prev = NULL;
59243 + } else {
59244 + match->prev->next = match->next;
59245 + if (match->next != NULL)
59246 + match->next->prev = match->prev;
59247 + }
59248 + match->prev = NULL;
59249 + match->next = NULL;
59250 + match->inode = newinode;
59251 + match->device = newdevice;
59252 + match->mode &= ~GR_DELETED;
59253 +
59254 + insert_acl_obj_label(match, subj);
59255 + }
59256 +
59257 + return;
59258 +}
59259 +
59260 +static void
59261 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
59262 + const ino_t newinode, const dev_t newdevice,
59263 + struct acl_role_label *role)
59264 +{
59265 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
59266 + struct acl_subject_label *match;
59267 +
59268 + match = role->subj_hash[index];
59269 +
59270 + while (match && (match->inode != oldinode ||
59271 + match->device != olddevice ||
59272 + !(match->mode & GR_DELETED)))
59273 + match = match->next;
59274 +
59275 + if (match && (match->inode == oldinode)
59276 + && (match->device == olddevice)
59277 + && (match->mode & GR_DELETED)) {
59278 + if (match->prev == NULL) {
59279 + role->subj_hash[index] = match->next;
59280 + if (match->next != NULL)
59281 + match->next->prev = NULL;
59282 + } else {
59283 + match->prev->next = match->next;
59284 + if (match->next != NULL)
59285 + match->next->prev = match->prev;
59286 + }
59287 + match->prev = NULL;
59288 + match->next = NULL;
59289 + match->inode = newinode;
59290 + match->device = newdevice;
59291 + match->mode &= ~GR_DELETED;
59292 +
59293 + insert_acl_subj_label(match, role);
59294 + }
59295 +
59296 + return;
59297 +}
59298 +
59299 +static void
59300 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
59301 + const ino_t newinode, const dev_t newdevice)
59302 +{
59303 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
59304 + struct inodev_entry *match;
59305 +
59306 + match = inodev_set.i_hash[index];
59307 +
59308 + while (match && (match->nentry->inode != oldinode ||
59309 + match->nentry->device != olddevice || !match->nentry->deleted))
59310 + match = match->next;
59311 +
59312 + if (match && (match->nentry->inode == oldinode)
59313 + && (match->nentry->device == olddevice) &&
59314 + match->nentry->deleted) {
59315 + if (match->prev == NULL) {
59316 + inodev_set.i_hash[index] = match->next;
59317 + if (match->next != NULL)
59318 + match->next->prev = NULL;
59319 + } else {
59320 + match->prev->next = match->next;
59321 + if (match->next != NULL)
59322 + match->next->prev = match->prev;
59323 + }
59324 + match->prev = NULL;
59325 + match->next = NULL;
59326 + match->nentry->inode = newinode;
59327 + match->nentry->device = newdevice;
59328 + match->nentry->deleted = 0;
59329 +
59330 + insert_inodev_entry(match);
59331 + }
59332 +
59333 + return;
59334 +}
59335 +
59336 +static void
59337 +__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
59338 +{
59339 + struct acl_subject_label *subj;
59340 + struct acl_role_label *role;
59341 + unsigned int x;
59342 +
59343 + FOR_EACH_ROLE_START(role)
59344 + update_acl_subj_label(matchn->inode, matchn->device,
59345 + inode, dev, role);
59346 +
59347 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
59348 + if ((subj->inode == inode) && (subj->device == dev)) {
59349 + subj->inode = inode;
59350 + subj->device = dev;
59351 + }
59352 + FOR_EACH_NESTED_SUBJECT_END(subj)
59353 + FOR_EACH_SUBJECT_START(role, subj, x)
59354 + update_acl_obj_label(matchn->inode, matchn->device,
59355 + inode, dev, subj);
59356 + FOR_EACH_SUBJECT_END(subj,x)
59357 + FOR_EACH_ROLE_END(role)
59358 +
59359 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
59360 +
59361 + return;
59362 +}
59363 +
59364 +static void
59365 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
59366 + const struct vfsmount *mnt)
59367 +{
59368 + ino_t ino = dentry->d_inode->i_ino;
59369 + dev_t dev = __get_dev(dentry);
59370 +
59371 + __do_handle_create(matchn, ino, dev);
59372 +
59373 + return;
59374 +}
59375 +
59376 +void
59377 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59378 +{
59379 + struct name_entry *matchn;
59380 +
59381 + if (unlikely(!(gr_status & GR_READY)))
59382 + return;
59383 +
59384 + preempt_disable();
59385 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
59386 +
59387 + if (unlikely((unsigned long)matchn)) {
59388 + write_lock(&gr_inode_lock);
59389 + do_handle_create(matchn, dentry, mnt);
59390 + write_unlock(&gr_inode_lock);
59391 + }
59392 + preempt_enable();
59393 +
59394 + return;
59395 +}
59396 +
59397 +void
59398 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59399 +{
59400 + struct name_entry *matchn;
59401 +
59402 + if (unlikely(!(gr_status & GR_READY)))
59403 + return;
59404 +
59405 + preempt_disable();
59406 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
59407 +
59408 + if (unlikely((unsigned long)matchn)) {
59409 + write_lock(&gr_inode_lock);
59410 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
59411 + write_unlock(&gr_inode_lock);
59412 + }
59413 + preempt_enable();
59414 +
59415 + return;
59416 +}
59417 +
59418 +void
59419 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59420 + struct dentry *old_dentry,
59421 + struct dentry *new_dentry,
59422 + struct vfsmount *mnt, const __u8 replace)
59423 +{
59424 + struct name_entry *matchn;
59425 + struct inodev_entry *inodev;
59426 + struct inode *inode = new_dentry->d_inode;
59427 + ino_t oldinode = old_dentry->d_inode->i_ino;
59428 + dev_t olddev = __get_dev(old_dentry);
59429 +
59430 + /* vfs_rename swaps the name and parent link for old_dentry and
59431 + new_dentry
59432 + at this point, old_dentry has the new name, parent link, and inode
59433 + for the renamed file
59434 + if a file is being replaced by a rename, new_dentry has the inode
59435 + and name for the replaced file
59436 + */
59437 +
59438 + if (unlikely(!(gr_status & GR_READY)))
59439 + return;
59440 +
59441 + preempt_disable();
59442 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
59443 +
59444 + /* we wouldn't have to check d_inode if it weren't for
59445 + NFS silly-renaming
59446 + */
59447 +
59448 + write_lock(&gr_inode_lock);
59449 + if (unlikely(replace && inode)) {
59450 + ino_t newinode = inode->i_ino;
59451 + dev_t newdev = __get_dev(new_dentry);
59452 + inodev = lookup_inodev_entry(newinode, newdev);
59453 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
59454 + do_handle_delete(inodev, newinode, newdev);
59455 + }
59456 +
59457 + inodev = lookup_inodev_entry(oldinode, olddev);
59458 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
59459 + do_handle_delete(inodev, oldinode, olddev);
59460 +
59461 + if (unlikely((unsigned long)matchn))
59462 + do_handle_create(matchn, old_dentry, mnt);
59463 +
59464 + write_unlock(&gr_inode_lock);
59465 + preempt_enable();
59466 +
59467 + return;
59468 +}
59469 +
59470 +static int
59471 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
59472 + unsigned char **sum)
59473 +{
59474 + struct acl_role_label *r;
59475 + struct role_allowed_ip *ipp;
59476 + struct role_transition *trans;
59477 + unsigned int i;
59478 + int found = 0;
59479 + u32 curr_ip = current->signal->curr_ip;
59480 +
59481 + current->signal->saved_ip = curr_ip;
59482 +
59483 + /* check transition table */
59484 +
59485 + for (trans = current->role->transitions; trans; trans = trans->next) {
59486 + if (!strcmp(rolename, trans->rolename)) {
59487 + found = 1;
59488 + break;
59489 + }
59490 + }
59491 +
59492 + if (!found)
59493 + return 0;
59494 +
59495 + /* handle special roles that do not require authentication
59496 + and check ip */
59497 +
59498 + FOR_EACH_ROLE_START(r)
59499 + if (!strcmp(rolename, r->rolename) &&
59500 + (r->roletype & GR_ROLE_SPECIAL)) {
59501 + found = 0;
59502 + if (r->allowed_ips != NULL) {
59503 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
59504 + if ((ntohl(curr_ip) & ipp->netmask) ==
59505 + (ntohl(ipp->addr) & ipp->netmask))
59506 + found = 1;
59507 + }
59508 + } else
59509 + found = 2;
59510 + if (!found)
59511 + return 0;
59512 +
59513 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
59514 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
59515 + *salt = NULL;
59516 + *sum = NULL;
59517 + return 1;
59518 + }
59519 + }
59520 + FOR_EACH_ROLE_END(r)
59521 +
59522 + for (i = 0; i < num_sprole_pws; i++) {
59523 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
59524 + *salt = acl_special_roles[i]->salt;
59525 + *sum = acl_special_roles[i]->sum;
59526 + return 1;
59527 + }
59528 + }
59529 +
59530 + return 0;
59531 +}
59532 +
59533 +static void
59534 +assign_special_role(char *rolename)
59535 +{
59536 + struct acl_object_label *obj;
59537 + struct acl_role_label *r;
59538 + struct acl_role_label *assigned = NULL;
59539 + struct task_struct *tsk;
59540 + struct file *filp;
59541 +
59542 + FOR_EACH_ROLE_START(r)
59543 + if (!strcmp(rolename, r->rolename) &&
59544 + (r->roletype & GR_ROLE_SPECIAL)) {
59545 + assigned = r;
59546 + break;
59547 + }
59548 + FOR_EACH_ROLE_END(r)
59549 +
59550 + if (!assigned)
59551 + return;
59552 +
59553 + read_lock(&tasklist_lock);
59554 + read_lock(&grsec_exec_file_lock);
59555 +
59556 + tsk = current->real_parent;
59557 + if (tsk == NULL)
59558 + goto out_unlock;
59559 +
59560 + filp = tsk->exec_file;
59561 + if (filp == NULL)
59562 + goto out_unlock;
59563 +
59564 + tsk->is_writable = 0;
59565 +
59566 + tsk->acl_sp_role = 1;
59567 + tsk->acl_role_id = ++acl_sp_role_value;
59568 + tsk->role = assigned;
59569 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
59570 +
59571 + /* ignore additional mmap checks for processes that are writable
59572 + by the default ACL */
59573 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59574 + if (unlikely(obj->mode & GR_WRITE))
59575 + tsk->is_writable = 1;
59576 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
59577 + if (unlikely(obj->mode & GR_WRITE))
59578 + tsk->is_writable = 1;
59579 +
59580 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59581 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
59582 +#endif
59583 +
59584 +out_unlock:
59585 + read_unlock(&grsec_exec_file_lock);
59586 + read_unlock(&tasklist_lock);
59587 + return;
59588 +}
59589 +
59590 +int gr_check_secure_terminal(struct task_struct *task)
59591 +{
59592 + struct task_struct *p, *p2, *p3;
59593 + struct files_struct *files;
59594 + struct fdtable *fdt;
59595 + struct file *our_file = NULL, *file;
59596 + int i;
59597 +
59598 + if (task->signal->tty == NULL)
59599 + return 1;
59600 +
59601 + files = get_files_struct(task);
59602 + if (files != NULL) {
59603 + rcu_read_lock();
59604 + fdt = files_fdtable(files);
59605 + for (i=0; i < fdt->max_fds; i++) {
59606 + file = fcheck_files(files, i);
59607 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
59608 + get_file(file);
59609 + our_file = file;
59610 + }
59611 + }
59612 + rcu_read_unlock();
59613 + put_files_struct(files);
59614 + }
59615 +
59616 + if (our_file == NULL)
59617 + return 1;
59618 +
59619 + read_lock(&tasklist_lock);
59620 + do_each_thread(p2, p) {
59621 + files = get_files_struct(p);
59622 + if (files == NULL ||
59623 + (p->signal && p->signal->tty == task->signal->tty)) {
59624 + if (files != NULL)
59625 + put_files_struct(files);
59626 + continue;
59627 + }
59628 + rcu_read_lock();
59629 + fdt = files_fdtable(files);
59630 + for (i=0; i < fdt->max_fds; i++) {
59631 + file = fcheck_files(files, i);
59632 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
59633 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
59634 + p3 = task;
59635 + while (p3->pid > 0) {
59636 + if (p3 == p)
59637 + break;
59638 + p3 = p3->real_parent;
59639 + }
59640 + if (p3 == p)
59641 + break;
59642 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
59643 + gr_handle_alertkill(p);
59644 + rcu_read_unlock();
59645 + put_files_struct(files);
59646 + read_unlock(&tasklist_lock);
59647 + fput(our_file);
59648 + return 0;
59649 + }
59650 + }
59651 + rcu_read_unlock();
59652 + put_files_struct(files);
59653 + } while_each_thread(p2, p);
59654 + read_unlock(&tasklist_lock);
59655 +
59656 + fput(our_file);
59657 + return 1;
59658 +}
59659 +
59660 +ssize_t
59661 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
59662 +{
59663 + struct gr_arg_wrapper uwrap;
59664 + unsigned char *sprole_salt = NULL;
59665 + unsigned char *sprole_sum = NULL;
59666 + int error = sizeof (struct gr_arg_wrapper);
59667 + int error2 = 0;
59668 +
59669 + mutex_lock(&gr_dev_mutex);
59670 +
59671 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
59672 + error = -EPERM;
59673 + goto out;
59674 + }
59675 +
59676 + if (count != sizeof (struct gr_arg_wrapper)) {
59677 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
59678 + error = -EINVAL;
59679 + goto out;
59680 + }
59681 +
59682 +
59683 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
59684 + gr_auth_expires = 0;
59685 + gr_auth_attempts = 0;
59686 + }
59687 +
59688 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
59689 + error = -EFAULT;
59690 + goto out;
59691 + }
59692 +
59693 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
59694 + error = -EINVAL;
59695 + goto out;
59696 + }
59697 +
59698 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
59699 + error = -EFAULT;
59700 + goto out;
59701 + }
59702 +
59703 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59704 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59705 + time_after(gr_auth_expires, get_seconds())) {
59706 + error = -EBUSY;
59707 + goto out;
59708 + }
59709 +
59710 + /* if non-root trying to do anything other than use a special role,
59711 + do not attempt authentication, do not count towards authentication
59712 + locking
59713 + */
59714 +
59715 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
59716 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59717 + current_uid()) {
59718 + error = -EPERM;
59719 + goto out;
59720 + }
59721 +
59722 + /* ensure pw and special role name are null terminated */
59723 +
59724 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
59725 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
59726 +
59727 + /* Okay.
59728 + * We have our enough of the argument structure..(we have yet
59729 + * to copy_from_user the tables themselves) . Copy the tables
59730 + * only if we need them, i.e. for loading operations. */
59731 +
59732 + switch (gr_usermode->mode) {
59733 + case GR_STATUS:
59734 + if (gr_status & GR_READY) {
59735 + error = 1;
59736 + if (!gr_check_secure_terminal(current))
59737 + error = 3;
59738 + } else
59739 + error = 2;
59740 + goto out;
59741 + case GR_SHUTDOWN:
59742 + if ((gr_status & GR_READY)
59743 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59744 + pax_open_kernel();
59745 + gr_status &= ~GR_READY;
59746 + pax_close_kernel();
59747 +
59748 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59749 + free_variables();
59750 + memset(gr_usermode, 0, sizeof (struct gr_arg));
59751 + memset(gr_system_salt, 0, GR_SALT_LEN);
59752 + memset(gr_system_sum, 0, GR_SHA_LEN);
59753 + } else if (gr_status & GR_READY) {
59754 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59755 + error = -EPERM;
59756 + } else {
59757 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59758 + error = -EAGAIN;
59759 + }
59760 + break;
59761 + case GR_ENABLE:
59762 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59763 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59764 + else {
59765 + if (gr_status & GR_READY)
59766 + error = -EAGAIN;
59767 + else
59768 + error = error2;
59769 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59770 + }
59771 + break;
59772 + case GR_RELOAD:
59773 + if (!(gr_status & GR_READY)) {
59774 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59775 + error = -EAGAIN;
59776 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59777 + lock_kernel();
59778 +
59779 + pax_open_kernel();
59780 + gr_status &= ~GR_READY;
59781 + pax_close_kernel();
59782 +
59783 + free_variables();
59784 + if (!(error2 = gracl_init(gr_usermode))) {
59785 + unlock_kernel();
59786 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
59787 + } else {
59788 + unlock_kernel();
59789 + error = error2;
59790 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59791 + }
59792 + } else {
59793 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59794 + error = -EPERM;
59795 + }
59796 + break;
59797 + case GR_SEGVMOD:
59798 + if (unlikely(!(gr_status & GR_READY))) {
59799 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
59800 + error = -EAGAIN;
59801 + break;
59802 + }
59803 +
59804 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59805 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
59806 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
59807 + struct acl_subject_label *segvacl;
59808 + segvacl =
59809 + lookup_acl_subj_label(gr_usermode->segv_inode,
59810 + gr_usermode->segv_device,
59811 + current->role);
59812 + if (segvacl) {
59813 + segvacl->crashes = 0;
59814 + segvacl->expires = 0;
59815 + }
59816 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
59817 + gr_remove_uid(gr_usermode->segv_uid);
59818 + }
59819 + } else {
59820 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
59821 + error = -EPERM;
59822 + }
59823 + break;
59824 + case GR_SPROLE:
59825 + case GR_SPROLEPAM:
59826 + if (unlikely(!(gr_status & GR_READY))) {
59827 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
59828 + error = -EAGAIN;
59829 + break;
59830 + }
59831 +
59832 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
59833 + current->role->expires = 0;
59834 + current->role->auth_attempts = 0;
59835 + }
59836 +
59837 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59838 + time_after(current->role->expires, get_seconds())) {
59839 + error = -EBUSY;
59840 + goto out;
59841 + }
59842 +
59843 + if (lookup_special_role_auth
59844 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
59845 + && ((!sprole_salt && !sprole_sum)
59846 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
59847 + char *p = "";
59848 + assign_special_role(gr_usermode->sp_role);
59849 + read_lock(&tasklist_lock);
59850 + if (current->real_parent)
59851 + p = current->real_parent->role->rolename;
59852 + read_unlock(&tasklist_lock);
59853 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
59854 + p, acl_sp_role_value);
59855 + } else {
59856 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
59857 + error = -EPERM;
59858 + if(!(current->role->auth_attempts++))
59859 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59860 +
59861 + goto out;
59862 + }
59863 + break;
59864 + case GR_UNSPROLE:
59865 + if (unlikely(!(gr_status & GR_READY))) {
59866 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
59867 + error = -EAGAIN;
59868 + break;
59869 + }
59870 +
59871 + if (current->role->roletype & GR_ROLE_SPECIAL) {
59872 + char *p = "";
59873 + int i = 0;
59874 +
59875 + read_lock(&tasklist_lock);
59876 + if (current->real_parent) {
59877 + p = current->real_parent->role->rolename;
59878 + i = current->real_parent->acl_role_id;
59879 + }
59880 + read_unlock(&tasklist_lock);
59881 +
59882 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
59883 + gr_set_acls(1);
59884 + } else {
59885 + error = -EPERM;
59886 + goto out;
59887 + }
59888 + break;
59889 + default:
59890 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
59891 + error = -EINVAL;
59892 + break;
59893 + }
59894 +
59895 + if (error != -EPERM)
59896 + goto out;
59897 +
59898 + if(!(gr_auth_attempts++))
59899 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59900 +
59901 + out:
59902 + mutex_unlock(&gr_dev_mutex);
59903 + return error;
59904 +}
59905 +
59906 +/* must be called with
59907 + rcu_read_lock();
59908 + read_lock(&tasklist_lock);
59909 + read_lock(&grsec_exec_file_lock);
59910 +*/
59911 +int gr_apply_subject_to_task(struct task_struct *task)
59912 +{
59913 + struct acl_object_label *obj;
59914 + char *tmpname;
59915 + struct acl_subject_label *tmpsubj;
59916 + struct file *filp;
59917 + struct name_entry *nmatch;
59918 +
59919 + filp = task->exec_file;
59920 + if (filp == NULL)
59921 + return 0;
59922 +
59923 + /* the following is to apply the correct subject
59924 + on binaries running when the RBAC system
59925 + is enabled, when the binaries have been
59926 + replaced or deleted since their execution
59927 + -----
59928 + when the RBAC system starts, the inode/dev
59929 + from exec_file will be one the RBAC system
59930 + is unaware of. It only knows the inode/dev
59931 + of the present file on disk, or the absence
59932 + of it.
59933 + */
59934 + preempt_disable();
59935 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
59936 +
59937 + nmatch = lookup_name_entry(tmpname);
59938 + preempt_enable();
59939 + tmpsubj = NULL;
59940 + if (nmatch) {
59941 + if (nmatch->deleted)
59942 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
59943 + else
59944 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
59945 + if (tmpsubj != NULL)
59946 + task->acl = tmpsubj;
59947 + }
59948 + if (tmpsubj == NULL)
59949 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
59950 + task->role);
59951 + if (task->acl) {
59952 + task->is_writable = 0;
59953 + /* ignore additional mmap checks for processes that are writable
59954 + by the default ACL */
59955 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59956 + if (unlikely(obj->mode & GR_WRITE))
59957 + task->is_writable = 1;
59958 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59959 + if (unlikely(obj->mode & GR_WRITE))
59960 + task->is_writable = 1;
59961 +
59962 + gr_set_proc_res(task);
59963 +
59964 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59965 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59966 +#endif
59967 + } else {
59968 + return 1;
59969 + }
59970 +
59971 + return 0;
59972 +}
59973 +
59974 +int
59975 +gr_set_acls(const int type)
59976 +{
59977 + struct task_struct *task, *task2;
59978 + struct acl_role_label *role = current->role;
59979 + __u16 acl_role_id = current->acl_role_id;
59980 + const struct cred *cred;
59981 + int ret;
59982 +
59983 + rcu_read_lock();
59984 + read_lock(&tasklist_lock);
59985 + read_lock(&grsec_exec_file_lock);
59986 + do_each_thread(task2, task) {
59987 + /* check to see if we're called from the exit handler,
59988 + if so, only replace ACLs that have inherited the admin
59989 + ACL */
59990 +
59991 + if (type && (task->role != role ||
59992 + task->acl_role_id != acl_role_id))
59993 + continue;
59994 +
59995 + task->acl_role_id = 0;
59996 + task->acl_sp_role = 0;
59997 +
59998 + if (task->exec_file) {
59999 + cred = __task_cred(task);
60000 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
60001 +
60002 + ret = gr_apply_subject_to_task(task);
60003 + if (ret) {
60004 + read_unlock(&grsec_exec_file_lock);
60005 + read_unlock(&tasklist_lock);
60006 + rcu_read_unlock();
60007 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
60008 + return ret;
60009 + }
60010 + } else {
60011 + // it's a kernel process
60012 + task->role = kernel_role;
60013 + task->acl = kernel_role->root_label;
60014 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
60015 + task->acl->mode &= ~GR_PROCFIND;
60016 +#endif
60017 + }
60018 + } while_each_thread(task2, task);
60019 + read_unlock(&grsec_exec_file_lock);
60020 + read_unlock(&tasklist_lock);
60021 + rcu_read_unlock();
60022 +
60023 + return 0;
60024 +}
60025 +
60026 +void
60027 +gr_learn_resource(const struct task_struct *task,
60028 + const int res, const unsigned long wanted, const int gt)
60029 +{
60030 + struct acl_subject_label *acl;
60031 + const struct cred *cred;
60032 +
60033 + if (unlikely((gr_status & GR_READY) &&
60034 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
60035 + goto skip_reslog;
60036 +
60037 +#ifdef CONFIG_GRKERNSEC_RESLOG
60038 + gr_log_resource(task, res, wanted, gt);
60039 +#endif
60040 + skip_reslog:
60041 +
60042 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
60043 + return;
60044 +
60045 + acl = task->acl;
60046 +
60047 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
60048 + !(acl->resmask & (1 << (unsigned short) res))))
60049 + return;
60050 +
60051 + if (wanted >= acl->res[res].rlim_cur) {
60052 + unsigned long res_add;
60053 +
60054 + res_add = wanted;
60055 + switch (res) {
60056 + case RLIMIT_CPU:
60057 + res_add += GR_RLIM_CPU_BUMP;
60058 + break;
60059 + case RLIMIT_FSIZE:
60060 + res_add += GR_RLIM_FSIZE_BUMP;
60061 + break;
60062 + case RLIMIT_DATA:
60063 + res_add += GR_RLIM_DATA_BUMP;
60064 + break;
60065 + case RLIMIT_STACK:
60066 + res_add += GR_RLIM_STACK_BUMP;
60067 + break;
60068 + case RLIMIT_CORE:
60069 + res_add += GR_RLIM_CORE_BUMP;
60070 + break;
60071 + case RLIMIT_RSS:
60072 + res_add += GR_RLIM_RSS_BUMP;
60073 + break;
60074 + case RLIMIT_NPROC:
60075 + res_add += GR_RLIM_NPROC_BUMP;
60076 + break;
60077 + case RLIMIT_NOFILE:
60078 + res_add += GR_RLIM_NOFILE_BUMP;
60079 + break;
60080 + case RLIMIT_MEMLOCK:
60081 + res_add += GR_RLIM_MEMLOCK_BUMP;
60082 + break;
60083 + case RLIMIT_AS:
60084 + res_add += GR_RLIM_AS_BUMP;
60085 + break;
60086 + case RLIMIT_LOCKS:
60087 + res_add += GR_RLIM_LOCKS_BUMP;
60088 + break;
60089 + case RLIMIT_SIGPENDING:
60090 + res_add += GR_RLIM_SIGPENDING_BUMP;
60091 + break;
60092 + case RLIMIT_MSGQUEUE:
60093 + res_add += GR_RLIM_MSGQUEUE_BUMP;
60094 + break;
60095 + case RLIMIT_NICE:
60096 + res_add += GR_RLIM_NICE_BUMP;
60097 + break;
60098 + case RLIMIT_RTPRIO:
60099 + res_add += GR_RLIM_RTPRIO_BUMP;
60100 + break;
60101 + case RLIMIT_RTTIME:
60102 + res_add += GR_RLIM_RTTIME_BUMP;
60103 + break;
60104 + }
60105 +
60106 + acl->res[res].rlim_cur = res_add;
60107 +
60108 + if (wanted > acl->res[res].rlim_max)
60109 + acl->res[res].rlim_max = res_add;
60110 +
60111 + /* only log the subject filename, since resource logging is supported for
60112 + single-subject learning only */
60113 + rcu_read_lock();
60114 + cred = __task_cred(task);
60115 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60116 + task->role->roletype, cred->uid, cred->gid, acl->filename,
60117 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
60118 + "", (unsigned long) res, &task->signal->saved_ip);
60119 + rcu_read_unlock();
60120 + }
60121 +
60122 + return;
60123 +}
60124 +
60125 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
60126 +void
60127 +pax_set_initial_flags(struct linux_binprm *bprm)
60128 +{
60129 + struct task_struct *task = current;
60130 + struct acl_subject_label *proc;
60131 + unsigned long flags;
60132 +
60133 + if (unlikely(!(gr_status & GR_READY)))
60134 + return;
60135 +
60136 + flags = pax_get_flags(task);
60137 +
60138 + proc = task->acl;
60139 +
60140 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
60141 + flags &= ~MF_PAX_PAGEEXEC;
60142 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
60143 + flags &= ~MF_PAX_SEGMEXEC;
60144 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
60145 + flags &= ~MF_PAX_RANDMMAP;
60146 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
60147 + flags &= ~MF_PAX_EMUTRAMP;
60148 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
60149 + flags &= ~MF_PAX_MPROTECT;
60150 +
60151 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
60152 + flags |= MF_PAX_PAGEEXEC;
60153 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
60154 + flags |= MF_PAX_SEGMEXEC;
60155 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
60156 + flags |= MF_PAX_RANDMMAP;
60157 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
60158 + flags |= MF_PAX_EMUTRAMP;
60159 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
60160 + flags |= MF_PAX_MPROTECT;
60161 +
60162 + pax_set_flags(task, flags);
60163 +
60164 + return;
60165 +}
60166 +#endif
60167 +
60168 +#ifdef CONFIG_SYSCTL
60169 +/* Eric Biederman likes breaking userland ABI and every inode-based security
60170 + system to save 35kb of memory */
60171 +
60172 +/* we modify the passed in filename, but adjust it back before returning */
60173 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
60174 +{
60175 + struct name_entry *nmatch;
60176 + char *p, *lastp = NULL;
60177 + struct acl_object_label *obj = NULL, *tmp;
60178 + struct acl_subject_label *tmpsubj;
60179 + char c = '\0';
60180 +
60181 + read_lock(&gr_inode_lock);
60182 +
60183 + p = name + len - 1;
60184 + do {
60185 + nmatch = lookup_name_entry(name);
60186 + if (lastp != NULL)
60187 + *lastp = c;
60188 +
60189 + if (nmatch == NULL)
60190 + goto next_component;
60191 + tmpsubj = current->acl;
60192 + do {
60193 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
60194 + if (obj != NULL) {
60195 + tmp = obj->globbed;
60196 + while (tmp) {
60197 + if (!glob_match(tmp->filename, name)) {
60198 + obj = tmp;
60199 + goto found_obj;
60200 + }
60201 + tmp = tmp->next;
60202 + }
60203 + goto found_obj;
60204 + }
60205 + } while ((tmpsubj = tmpsubj->parent_subject));
60206 +next_component:
60207 + /* end case */
60208 + if (p == name)
60209 + break;
60210 +
60211 + while (*p != '/')
60212 + p--;
60213 + if (p == name)
60214 + lastp = p + 1;
60215 + else {
60216 + lastp = p;
60217 + p--;
60218 + }
60219 + c = *lastp;
60220 + *lastp = '\0';
60221 + } while (1);
60222 +found_obj:
60223 + read_unlock(&gr_inode_lock);
60224 + /* obj returned will always be non-null */
60225 + return obj;
60226 +}
60227 +
60228 +/* returns 0 when allowing, non-zero on error
60229 + op of 0 is used for readdir, so we don't log the names of hidden files
60230 +*/
60231 +__u32
60232 +gr_handle_sysctl(const struct ctl_table *table, const int op)
60233 +{
60234 + ctl_table *tmp;
60235 + const char *proc_sys = "/proc/sys";
60236 + char *path;
60237 + struct acl_object_label *obj;
60238 + unsigned short len = 0, pos = 0, depth = 0, i;
60239 + __u32 err = 0;
60240 + __u32 mode = 0;
60241 +
60242 + if (unlikely(!(gr_status & GR_READY)))
60243 + return 0;
60244 +
60245 + /* for now, ignore operations on non-sysctl entries if it's not a
60246 + readdir*/
60247 + if (table->child != NULL && op != 0)
60248 + return 0;
60249 +
60250 + mode |= GR_FIND;
60251 + /* it's only a read if it's an entry, read on dirs is for readdir */
60252 + if (op & MAY_READ)
60253 + mode |= GR_READ;
60254 + if (op & MAY_WRITE)
60255 + mode |= GR_WRITE;
60256 +
60257 + preempt_disable();
60258 +
60259 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
60260 +
60261 + /* it's only a read/write if it's an actual entry, not a dir
60262 + (which are opened for readdir)
60263 + */
60264 +
60265 + /* convert the requested sysctl entry into a pathname */
60266 +
60267 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60268 + len += strlen(tmp->procname);
60269 + len++;
60270 + depth++;
60271 + }
60272 +
60273 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
60274 + /* deny */
60275 + goto out;
60276 + }
60277 +
60278 + memset(path, 0, PAGE_SIZE);
60279 +
60280 + memcpy(path, proc_sys, strlen(proc_sys));
60281 +
60282 + pos += strlen(proc_sys);
60283 +
60284 + for (; depth > 0; depth--) {
60285 + path[pos] = '/';
60286 + pos++;
60287 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60288 + if (depth == i) {
60289 + memcpy(path + pos, tmp->procname,
60290 + strlen(tmp->procname));
60291 + pos += strlen(tmp->procname);
60292 + }
60293 + i++;
60294 + }
60295 + }
60296 +
60297 + obj = gr_lookup_by_name(path, pos);
60298 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
60299 +
60300 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
60301 + ((err & mode) != mode))) {
60302 + __u32 new_mode = mode;
60303 +
60304 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60305 +
60306 + err = 0;
60307 + gr_log_learn_sysctl(path, new_mode);
60308 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
60309 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
60310 + err = -ENOENT;
60311 + } else if (!(err & GR_FIND)) {
60312 + err = -ENOENT;
60313 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
60314 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
60315 + path, (mode & GR_READ) ? " reading" : "",
60316 + (mode & GR_WRITE) ? " writing" : "");
60317 + err = -EACCES;
60318 + } else if ((err & mode) != mode) {
60319 + err = -EACCES;
60320 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
60321 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
60322 + path, (mode & GR_READ) ? " reading" : "",
60323 + (mode & GR_WRITE) ? " writing" : "");
60324 + err = 0;
60325 + } else
60326 + err = 0;
60327 +
60328 + out:
60329 + preempt_enable();
60330 +
60331 + return err;
60332 +}
60333 +#endif
60334 +
60335 +int
60336 +gr_handle_proc_ptrace(struct task_struct *task)
60337 +{
60338 + struct file *filp;
60339 + struct task_struct *tmp = task;
60340 + struct task_struct *curtemp = current;
60341 + __u32 retmode;
60342 +
60343 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60344 + if (unlikely(!(gr_status & GR_READY)))
60345 + return 0;
60346 +#endif
60347 +
60348 + read_lock(&tasklist_lock);
60349 + read_lock(&grsec_exec_file_lock);
60350 + filp = task->exec_file;
60351 +
60352 + while (tmp->pid > 0) {
60353 + if (tmp == curtemp)
60354 + break;
60355 + tmp = tmp->real_parent;
60356 + }
60357 +
60358 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60359 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
60360 + read_unlock(&grsec_exec_file_lock);
60361 + read_unlock(&tasklist_lock);
60362 + return 1;
60363 + }
60364 +
60365 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60366 + if (!(gr_status & GR_READY)) {
60367 + read_unlock(&grsec_exec_file_lock);
60368 + read_unlock(&tasklist_lock);
60369 + return 0;
60370 + }
60371 +#endif
60372 +
60373 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
60374 + read_unlock(&grsec_exec_file_lock);
60375 + read_unlock(&tasklist_lock);
60376 +
60377 + if (retmode & GR_NOPTRACE)
60378 + return 1;
60379 +
60380 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
60381 + && (current->acl != task->acl || (current->acl != current->role->root_label
60382 + && current->pid != task->pid)))
60383 + return 1;
60384 +
60385 + return 0;
60386 +}
60387 +
60388 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
60389 +{
60390 + if (unlikely(!(gr_status & GR_READY)))
60391 + return;
60392 +
60393 + if (!(current->role->roletype & GR_ROLE_GOD))
60394 + return;
60395 +
60396 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
60397 + p->role->rolename, gr_task_roletype_to_char(p),
60398 + p->acl->filename);
60399 +}
60400 +
60401 +int
60402 +gr_handle_ptrace(struct task_struct *task, const long request)
60403 +{
60404 + struct task_struct *tmp = task;
60405 + struct task_struct *curtemp = current;
60406 + __u32 retmode;
60407 +
60408 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60409 + if (unlikely(!(gr_status & GR_READY)))
60410 + return 0;
60411 +#endif
60412 +
60413 + read_lock(&tasklist_lock);
60414 + while (tmp->pid > 0) {
60415 + if (tmp == curtemp)
60416 + break;
60417 + tmp = tmp->real_parent;
60418 + }
60419 +
60420 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60421 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
60422 + read_unlock(&tasklist_lock);
60423 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60424 + return 1;
60425 + }
60426 + read_unlock(&tasklist_lock);
60427 +
60428 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60429 + if (!(gr_status & GR_READY))
60430 + return 0;
60431 +#endif
60432 +
60433 + read_lock(&grsec_exec_file_lock);
60434 + if (unlikely(!task->exec_file)) {
60435 + read_unlock(&grsec_exec_file_lock);
60436 + return 0;
60437 + }
60438 +
60439 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
60440 + read_unlock(&grsec_exec_file_lock);
60441 +
60442 + if (retmode & GR_NOPTRACE) {
60443 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60444 + return 1;
60445 + }
60446 +
60447 + if (retmode & GR_PTRACERD) {
60448 + switch (request) {
60449 + case PTRACE_POKETEXT:
60450 + case PTRACE_POKEDATA:
60451 + case PTRACE_POKEUSR:
60452 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
60453 + case PTRACE_SETREGS:
60454 + case PTRACE_SETFPREGS:
60455 +#endif
60456 +#ifdef CONFIG_X86
60457 + case PTRACE_SETFPXREGS:
60458 +#endif
60459 +#ifdef CONFIG_ALTIVEC
60460 + case PTRACE_SETVRREGS:
60461 +#endif
60462 + return 1;
60463 + default:
60464 + return 0;
60465 + }
60466 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
60467 + !(current->role->roletype & GR_ROLE_GOD) &&
60468 + (current->acl != task->acl)) {
60469 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60470 + return 1;
60471 + }
60472 +
60473 + return 0;
60474 +}
60475 +
60476 +static int is_writable_mmap(const struct file *filp)
60477 +{
60478 + struct task_struct *task = current;
60479 + struct acl_object_label *obj, *obj2;
60480 +
60481 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
60482 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
60483 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60484 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
60485 + task->role->root_label);
60486 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
60487 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
60488 + return 1;
60489 + }
60490 + }
60491 + return 0;
60492 +}
60493 +
60494 +int
60495 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
60496 +{
60497 + __u32 mode;
60498 +
60499 + if (unlikely(!file || !(prot & PROT_EXEC)))
60500 + return 1;
60501 +
60502 + if (is_writable_mmap(file))
60503 + return 0;
60504 +
60505 + mode =
60506 + gr_search_file(file->f_path.dentry,
60507 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60508 + file->f_path.mnt);
60509 +
60510 + if (!gr_tpe_allow(file))
60511 + return 0;
60512 +
60513 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60514 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60515 + return 0;
60516 + } else if (unlikely(!(mode & GR_EXEC))) {
60517 + return 0;
60518 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60519 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60520 + return 1;
60521 + }
60522 +
60523 + return 1;
60524 +}
60525 +
60526 +int
60527 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
60528 +{
60529 + __u32 mode;
60530 +
60531 + if (unlikely(!file || !(prot & PROT_EXEC)))
60532 + return 1;
60533 +
60534 + if (is_writable_mmap(file))
60535 + return 0;
60536 +
60537 + mode =
60538 + gr_search_file(file->f_path.dentry,
60539 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60540 + file->f_path.mnt);
60541 +
60542 + if (!gr_tpe_allow(file))
60543 + return 0;
60544 +
60545 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60546 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60547 + return 0;
60548 + } else if (unlikely(!(mode & GR_EXEC))) {
60549 + return 0;
60550 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60551 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60552 + return 1;
60553 + }
60554 +
60555 + return 1;
60556 +}
60557 +
60558 +void
60559 +gr_acl_handle_psacct(struct task_struct *task, const long code)
60560 +{
60561 + unsigned long runtime;
60562 + unsigned long cputime;
60563 + unsigned int wday, cday;
60564 + __u8 whr, chr;
60565 + __u8 wmin, cmin;
60566 + __u8 wsec, csec;
60567 + struct timespec timeval;
60568 +
60569 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
60570 + !(task->acl->mode & GR_PROCACCT)))
60571 + return;
60572 +
60573 + do_posix_clock_monotonic_gettime(&timeval);
60574 + runtime = timeval.tv_sec - task->start_time.tv_sec;
60575 + wday = runtime / (3600 * 24);
60576 + runtime -= wday * (3600 * 24);
60577 + whr = runtime / 3600;
60578 + runtime -= whr * 3600;
60579 + wmin = runtime / 60;
60580 + runtime -= wmin * 60;
60581 + wsec = runtime;
60582 +
60583 + cputime = (task->utime + task->stime) / HZ;
60584 + cday = cputime / (3600 * 24);
60585 + cputime -= cday * (3600 * 24);
60586 + chr = cputime / 3600;
60587 + cputime -= chr * 3600;
60588 + cmin = cputime / 60;
60589 + cputime -= cmin * 60;
60590 + csec = cputime;
60591 +
60592 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
60593 +
60594 + return;
60595 +}
60596 +
60597 +void gr_set_kernel_label(struct task_struct *task)
60598 +{
60599 + if (gr_status & GR_READY) {
60600 + task->role = kernel_role;
60601 + task->acl = kernel_role->root_label;
60602 + }
60603 + return;
60604 +}
60605 +
60606 +#ifdef CONFIG_TASKSTATS
60607 +int gr_is_taskstats_denied(int pid)
60608 +{
60609 + struct task_struct *task;
60610 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60611 + const struct cred *cred;
60612 +#endif
60613 + int ret = 0;
60614 +
60615 + /* restrict taskstats viewing to un-chrooted root users
60616 + who have the 'view' subject flag if the RBAC system is enabled
60617 + */
60618 +
60619 + rcu_read_lock();
60620 + read_lock(&tasklist_lock);
60621 + task = find_task_by_vpid(pid);
60622 + if (task) {
60623 +#ifdef CONFIG_GRKERNSEC_CHROOT
60624 + if (proc_is_chrooted(task))
60625 + ret = -EACCES;
60626 +#endif
60627 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60628 + cred = __task_cred(task);
60629 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60630 + if (cred->uid != 0)
60631 + ret = -EACCES;
60632 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60633 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
60634 + ret = -EACCES;
60635 +#endif
60636 +#endif
60637 + if (gr_status & GR_READY) {
60638 + if (!(task->acl->mode & GR_VIEW))
60639 + ret = -EACCES;
60640 + }
60641 + } else
60642 + ret = -ENOENT;
60643 +
60644 + read_unlock(&tasklist_lock);
60645 + rcu_read_unlock();
60646 +
60647 + return ret;
60648 +}
60649 +#endif
60650 +
60651 +/* AUXV entries are filled via a descendant of search_binary_handler
60652 + after we've already applied the subject for the target
60653 +*/
60654 +int gr_acl_enable_at_secure(void)
60655 +{
60656 + if (unlikely(!(gr_status & GR_READY)))
60657 + return 0;
60658 +
60659 + if (current->acl->mode & GR_ATSECURE)
60660 + return 1;
60661 +
60662 + return 0;
60663 +}
60664 +
60665 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
60666 +{
60667 + struct task_struct *task = current;
60668 + struct dentry *dentry = file->f_path.dentry;
60669 + struct vfsmount *mnt = file->f_path.mnt;
60670 + struct acl_object_label *obj, *tmp;
60671 + struct acl_subject_label *subj;
60672 + unsigned int bufsize;
60673 + int is_not_root;
60674 + char *path;
60675 + dev_t dev = __get_dev(dentry);
60676 +
60677 + if (unlikely(!(gr_status & GR_READY)))
60678 + return 1;
60679 +
60680 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60681 + return 1;
60682 +
60683 + /* ignore Eric Biederman */
60684 + if (IS_PRIVATE(dentry->d_inode))
60685 + return 1;
60686 +
60687 + subj = task->acl;
60688 + do {
60689 + obj = lookup_acl_obj_label(ino, dev, subj);
60690 + if (obj != NULL)
60691 + return (obj->mode & GR_FIND) ? 1 : 0;
60692 + } while ((subj = subj->parent_subject));
60693 +
60694 + /* this is purely an optimization since we're looking for an object
60695 + for the directory we're doing a readdir on
60696 + if it's possible for any globbed object to match the entry we're
60697 + filling into the directory, then the object we find here will be
60698 + an anchor point with attached globbed objects
60699 + */
60700 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
60701 + if (obj->globbed == NULL)
60702 + return (obj->mode & GR_FIND) ? 1 : 0;
60703 +
60704 + is_not_root = ((obj->filename[0] == '/') &&
60705 + (obj->filename[1] == '\0')) ? 0 : 1;
60706 + bufsize = PAGE_SIZE - namelen - is_not_root;
60707 +
60708 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
60709 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
60710 + return 1;
60711 +
60712 + preempt_disable();
60713 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
60714 + bufsize);
60715 +
60716 + bufsize = strlen(path);
60717 +
60718 + /* if base is "/", don't append an additional slash */
60719 + if (is_not_root)
60720 + *(path + bufsize) = '/';
60721 + memcpy(path + bufsize + is_not_root, name, namelen);
60722 + *(path + bufsize + namelen + is_not_root) = '\0';
60723 +
60724 + tmp = obj->globbed;
60725 + while (tmp) {
60726 + if (!glob_match(tmp->filename, path)) {
60727 + preempt_enable();
60728 + return (tmp->mode & GR_FIND) ? 1 : 0;
60729 + }
60730 + tmp = tmp->next;
60731 + }
60732 + preempt_enable();
60733 + return (obj->mode & GR_FIND) ? 1 : 0;
60734 +}
60735 +
60736 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60737 +EXPORT_SYMBOL(gr_acl_is_enabled);
60738 +#endif
60739 +EXPORT_SYMBOL(gr_learn_resource);
60740 +EXPORT_SYMBOL(gr_set_kernel_label);
60741 +#ifdef CONFIG_SECURITY
60742 +EXPORT_SYMBOL(gr_check_user_change);
60743 +EXPORT_SYMBOL(gr_check_group_change);
60744 +#endif
60745 +
60746 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60747 new file mode 100644
60748 index 0000000..34fefda
60749 --- /dev/null
60750 +++ b/grsecurity/gracl_alloc.c
60751 @@ -0,0 +1,105 @@
60752 +#include <linux/kernel.h>
60753 +#include <linux/mm.h>
60754 +#include <linux/slab.h>
60755 +#include <linux/vmalloc.h>
60756 +#include <linux/gracl.h>
60757 +#include <linux/grsecurity.h>
60758 +
60759 +static unsigned long alloc_stack_next = 1;
60760 +static unsigned long alloc_stack_size = 1;
60761 +static void **alloc_stack;
60762 +
60763 +static __inline__ int
60764 +alloc_pop(void)
60765 +{
60766 + if (alloc_stack_next == 1)
60767 + return 0;
60768 +
60769 + kfree(alloc_stack[alloc_stack_next - 2]);
60770 +
60771 + alloc_stack_next--;
60772 +
60773 + return 1;
60774 +}
60775 +
60776 +static __inline__ int
60777 +alloc_push(void *buf)
60778 +{
60779 + if (alloc_stack_next >= alloc_stack_size)
60780 + return 1;
60781 +
60782 + alloc_stack[alloc_stack_next - 1] = buf;
60783 +
60784 + alloc_stack_next++;
60785 +
60786 + return 0;
60787 +}
60788 +
60789 +void *
60790 +acl_alloc(unsigned long len)
60791 +{
60792 + void *ret = NULL;
60793 +
60794 + if (!len || len > PAGE_SIZE)
60795 + goto out;
60796 +
60797 + ret = kmalloc(len, GFP_KERNEL);
60798 +
60799 + if (ret) {
60800 + if (alloc_push(ret)) {
60801 + kfree(ret);
60802 + ret = NULL;
60803 + }
60804 + }
60805 +
60806 +out:
60807 + return ret;
60808 +}
60809 +
60810 +void *
60811 +acl_alloc_num(unsigned long num, unsigned long len)
60812 +{
60813 + if (!len || (num > (PAGE_SIZE / len)))
60814 + return NULL;
60815 +
60816 + return acl_alloc(num * len);
60817 +}
60818 +
60819 +void
60820 +acl_free_all(void)
60821 +{
60822 + if (gr_acl_is_enabled() || !alloc_stack)
60823 + return;
60824 +
60825 + while (alloc_pop()) ;
60826 +
60827 + if (alloc_stack) {
60828 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60829 + kfree(alloc_stack);
60830 + else
60831 + vfree(alloc_stack);
60832 + }
60833 +
60834 + alloc_stack = NULL;
60835 + alloc_stack_size = 1;
60836 + alloc_stack_next = 1;
60837 +
60838 + return;
60839 +}
60840 +
60841 +int
60842 +acl_alloc_stack_init(unsigned long size)
60843 +{
60844 + if ((size * sizeof (void *)) <= PAGE_SIZE)
60845 + alloc_stack =
60846 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60847 + else
60848 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
60849 +
60850 + alloc_stack_size = size;
60851 +
60852 + if (!alloc_stack)
60853 + return 0;
60854 + else
60855 + return 1;
60856 +}
60857 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60858 new file mode 100644
60859 index 0000000..955ddfb
60860 --- /dev/null
60861 +++ b/grsecurity/gracl_cap.c
60862 @@ -0,0 +1,101 @@
60863 +#include <linux/kernel.h>
60864 +#include <linux/module.h>
60865 +#include <linux/sched.h>
60866 +#include <linux/gracl.h>
60867 +#include <linux/grsecurity.h>
60868 +#include <linux/grinternal.h>
60869 +
60870 +extern const char *captab_log[];
60871 +extern int captab_log_entries;
60872 +
60873 +int
60874 +gr_acl_is_capable(const int cap)
60875 +{
60876 + struct task_struct *task = current;
60877 + const struct cred *cred = current_cred();
60878 + struct acl_subject_label *curracl;
60879 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60880 + kernel_cap_t cap_audit = __cap_empty_set;
60881 +
60882 + if (!gr_acl_is_enabled())
60883 + return 1;
60884 +
60885 + curracl = task->acl;
60886 +
60887 + cap_drop = curracl->cap_lower;
60888 + cap_mask = curracl->cap_mask;
60889 + cap_audit = curracl->cap_invert_audit;
60890 +
60891 + while ((curracl = curracl->parent_subject)) {
60892 + /* if the cap isn't specified in the current computed mask but is specified in the
60893 + current level subject, and is lowered in the current level subject, then add
60894 + it to the set of dropped capabilities
60895 + otherwise, add the current level subject's mask to the current computed mask
60896 + */
60897 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60898 + cap_raise(cap_mask, cap);
60899 + if (cap_raised(curracl->cap_lower, cap))
60900 + cap_raise(cap_drop, cap);
60901 + if (cap_raised(curracl->cap_invert_audit, cap))
60902 + cap_raise(cap_audit, cap);
60903 + }
60904 + }
60905 +
60906 + if (!cap_raised(cap_drop, cap)) {
60907 + if (cap_raised(cap_audit, cap))
60908 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60909 + return 1;
60910 + }
60911 +
60912 + curracl = task->acl;
60913 +
60914 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60915 + && cap_raised(cred->cap_effective, cap)) {
60916 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60917 + task->role->roletype, cred->uid,
60918 + cred->gid, task->exec_file ?
60919 + gr_to_filename(task->exec_file->f_path.dentry,
60920 + task->exec_file->f_path.mnt) : curracl->filename,
60921 + curracl->filename, 0UL,
60922 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
60923 + return 1;
60924 + }
60925 +
60926 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
60927 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
60928 + return 0;
60929 +}
60930 +
60931 +int
60932 +gr_acl_is_capable_nolog(const int cap)
60933 +{
60934 + struct acl_subject_label *curracl;
60935 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60936 +
60937 + if (!gr_acl_is_enabled())
60938 + return 1;
60939 +
60940 + curracl = current->acl;
60941 +
60942 + cap_drop = curracl->cap_lower;
60943 + cap_mask = curracl->cap_mask;
60944 +
60945 + while ((curracl = curracl->parent_subject)) {
60946 + /* if the cap isn't specified in the current computed mask but is specified in the
60947 + current level subject, and is lowered in the current level subject, then add
60948 + it to the set of dropped capabilities
60949 + otherwise, add the current level subject's mask to the current computed mask
60950 + */
60951 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60952 + cap_raise(cap_mask, cap);
60953 + if (cap_raised(curracl->cap_lower, cap))
60954 + cap_raise(cap_drop, cap);
60955 + }
60956 + }
60957 +
60958 + if (!cap_raised(cap_drop, cap))
60959 + return 1;
60960 +
60961 + return 0;
60962 +}
60963 +
60964 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
60965 new file mode 100644
60966 index 0000000..523e7e8
60967 --- /dev/null
60968 +++ b/grsecurity/gracl_fs.c
60969 @@ -0,0 +1,435 @@
60970 +#include <linux/kernel.h>
60971 +#include <linux/sched.h>
60972 +#include <linux/types.h>
60973 +#include <linux/fs.h>
60974 +#include <linux/file.h>
60975 +#include <linux/stat.h>
60976 +#include <linux/grsecurity.h>
60977 +#include <linux/grinternal.h>
60978 +#include <linux/gracl.h>
60979 +
60980 +umode_t
60981 +gr_acl_umask(void)
60982 +{
60983 + if (unlikely(!gr_acl_is_enabled()))
60984 + return 0;
60985 +
60986 + return current->role->umask;
60987 +}
60988 +
60989 +__u32
60990 +gr_acl_handle_hidden_file(const struct dentry * dentry,
60991 + const struct vfsmount * mnt)
60992 +{
60993 + __u32 mode;
60994 +
60995 + if (unlikely(!dentry->d_inode))
60996 + return GR_FIND;
60997 +
60998 + mode =
60999 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
61000 +
61001 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
61002 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61003 + return mode;
61004 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
61005 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61006 + return 0;
61007 + } else if (unlikely(!(mode & GR_FIND)))
61008 + return 0;
61009 +
61010 + return GR_FIND;
61011 +}
61012 +
61013 +__u32
61014 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
61015 + int acc_mode)
61016 +{
61017 + __u32 reqmode = GR_FIND;
61018 + __u32 mode;
61019 +
61020 + if (unlikely(!dentry->d_inode))
61021 + return reqmode;
61022 +
61023 + if (acc_mode & MAY_APPEND)
61024 + reqmode |= GR_APPEND;
61025 + else if (acc_mode & MAY_WRITE)
61026 + reqmode |= GR_WRITE;
61027 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
61028 + reqmode |= GR_READ;
61029 +
61030 + mode =
61031 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61032 + mnt);
61033 +
61034 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61035 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61036 + reqmode & GR_READ ? " reading" : "",
61037 + reqmode & GR_WRITE ? " writing" : reqmode &
61038 + GR_APPEND ? " appending" : "");
61039 + return reqmode;
61040 + } else
61041 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61042 + {
61043 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61044 + reqmode & GR_READ ? " reading" : "",
61045 + reqmode & GR_WRITE ? " writing" : reqmode &
61046 + GR_APPEND ? " appending" : "");
61047 + return 0;
61048 + } else if (unlikely((mode & reqmode) != reqmode))
61049 + return 0;
61050 +
61051 + return reqmode;
61052 +}
61053 +
61054 +__u32
61055 +gr_acl_handle_creat(const struct dentry * dentry,
61056 + const struct dentry * p_dentry,
61057 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
61058 + const int imode)
61059 +{
61060 + __u32 reqmode = GR_WRITE | GR_CREATE;
61061 + __u32 mode;
61062 +
61063 + if (acc_mode & MAY_APPEND)
61064 + reqmode |= GR_APPEND;
61065 + // if a directory was required or the directory already exists, then
61066 + // don't count this open as a read
61067 + if ((acc_mode & MAY_READ) &&
61068 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
61069 + reqmode |= GR_READ;
61070 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
61071 + reqmode |= GR_SETID;
61072 +
61073 + mode =
61074 + gr_check_create(dentry, p_dentry, p_mnt,
61075 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61076 +
61077 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61078 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61079 + reqmode & GR_READ ? " reading" : "",
61080 + reqmode & GR_WRITE ? " writing" : reqmode &
61081 + GR_APPEND ? " appending" : "");
61082 + return reqmode;
61083 + } else
61084 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61085 + {
61086 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61087 + reqmode & GR_READ ? " reading" : "",
61088 + reqmode & GR_WRITE ? " writing" : reqmode &
61089 + GR_APPEND ? " appending" : "");
61090 + return 0;
61091 + } else if (unlikely((mode & reqmode) != reqmode))
61092 + return 0;
61093 +
61094 + return reqmode;
61095 +}
61096 +
61097 +__u32
61098 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
61099 + const int fmode)
61100 +{
61101 + __u32 mode, reqmode = GR_FIND;
61102 +
61103 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
61104 + reqmode |= GR_EXEC;
61105 + if (fmode & S_IWOTH)
61106 + reqmode |= GR_WRITE;
61107 + if (fmode & S_IROTH)
61108 + reqmode |= GR_READ;
61109 +
61110 + mode =
61111 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61112 + mnt);
61113 +
61114 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61115 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61116 + reqmode & GR_READ ? " reading" : "",
61117 + reqmode & GR_WRITE ? " writing" : "",
61118 + reqmode & GR_EXEC ? " executing" : "");
61119 + return reqmode;
61120 + } else
61121 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61122 + {
61123 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61124 + reqmode & GR_READ ? " reading" : "",
61125 + reqmode & GR_WRITE ? " writing" : "",
61126 + reqmode & GR_EXEC ? " executing" : "");
61127 + return 0;
61128 + } else if (unlikely((mode & reqmode) != reqmode))
61129 + return 0;
61130 +
61131 + return reqmode;
61132 +}
61133 +
61134 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
61135 +{
61136 + __u32 mode;
61137 +
61138 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
61139 +
61140 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61141 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
61142 + return mode;
61143 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61144 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
61145 + return 0;
61146 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
61147 + return 0;
61148 +
61149 + return (reqmode);
61150 +}
61151 +
61152 +__u32
61153 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
61154 +{
61155 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
61156 +}
61157 +
61158 +__u32
61159 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
61160 +{
61161 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
61162 +}
61163 +
61164 +__u32
61165 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
61166 +{
61167 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
61168 +}
61169 +
61170 +__u32
61171 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
61172 +{
61173 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
61174 +}
61175 +
61176 +__u32
61177 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
61178 + umode_t *modeptr)
61179 +{
61180 + mode_t mode;
61181 +
61182 + *modeptr &= ~(mode_t)gr_acl_umask();
61183 + mode = *modeptr;
61184 +
61185 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
61186 + return 1;
61187 +
61188 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
61189 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
61190 + GR_CHMOD_ACL_MSG);
61191 + } else {
61192 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
61193 + }
61194 +}
61195 +
61196 +__u32
61197 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
61198 +{
61199 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
61200 +}
61201 +
61202 +__u32
61203 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
61204 +{
61205 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
61206 +}
61207 +
61208 +__u32
61209 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
61210 +{
61211 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
61212 +}
61213 +
61214 +__u32
61215 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
61216 +{
61217 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
61218 + GR_UNIXCONNECT_ACL_MSG);
61219 +}
61220 +
61221 +/* hardlinks require at minimum create and link permission,
61222 + any additional privilege required is based on the
61223 + privilege of the file being linked to
61224 +*/
61225 +__u32
61226 +gr_acl_handle_link(const struct dentry * new_dentry,
61227 + const struct dentry * parent_dentry,
61228 + const struct vfsmount * parent_mnt,
61229 + const struct dentry * old_dentry,
61230 + const struct vfsmount * old_mnt, const char *to)
61231 +{
61232 + __u32 mode;
61233 + __u32 needmode = GR_CREATE | GR_LINK;
61234 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
61235 +
61236 + mode =
61237 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
61238 + old_mnt);
61239 +
61240 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
61241 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61242 + return mode;
61243 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61244 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61245 + return 0;
61246 + } else if (unlikely((mode & needmode) != needmode))
61247 + return 0;
61248 +
61249 + return 1;
61250 +}
61251 +
61252 +__u32
61253 +gr_acl_handle_symlink(const struct dentry * new_dentry,
61254 + const struct dentry * parent_dentry,
61255 + const struct vfsmount * parent_mnt, const char *from)
61256 +{
61257 + __u32 needmode = GR_WRITE | GR_CREATE;
61258 + __u32 mode;
61259 +
61260 + mode =
61261 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
61262 + GR_CREATE | GR_AUDIT_CREATE |
61263 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
61264 +
61265 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
61266 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61267 + return mode;
61268 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61269 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61270 + return 0;
61271 + } else if (unlikely((mode & needmode) != needmode))
61272 + return 0;
61273 +
61274 + return (GR_WRITE | GR_CREATE);
61275 +}
61276 +
61277 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
61278 +{
61279 + __u32 mode;
61280 +
61281 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61282 +
61283 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61284 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
61285 + return mode;
61286 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61287 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
61288 + return 0;
61289 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
61290 + return 0;
61291 +
61292 + return (reqmode);
61293 +}
61294 +
61295 +__u32
61296 +gr_acl_handle_mknod(const struct dentry * new_dentry,
61297 + const struct dentry * parent_dentry,
61298 + const struct vfsmount * parent_mnt,
61299 + const int mode)
61300 +{
61301 + __u32 reqmode = GR_WRITE | GR_CREATE;
61302 + if (unlikely(mode & (S_ISUID | S_ISGID)))
61303 + reqmode |= GR_SETID;
61304 +
61305 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61306 + reqmode, GR_MKNOD_ACL_MSG);
61307 +}
61308 +
61309 +__u32
61310 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
61311 + const struct dentry *parent_dentry,
61312 + const struct vfsmount *parent_mnt)
61313 +{
61314 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61315 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
61316 +}
61317 +
61318 +#define RENAME_CHECK_SUCCESS(old, new) \
61319 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
61320 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
61321 +
61322 +int
61323 +gr_acl_handle_rename(struct dentry *new_dentry,
61324 + struct dentry *parent_dentry,
61325 + const struct vfsmount *parent_mnt,
61326 + struct dentry *old_dentry,
61327 + struct inode *old_parent_inode,
61328 + struct vfsmount *old_mnt, const char *newname)
61329 +{
61330 + __u32 comp1, comp2;
61331 + int error = 0;
61332 +
61333 + if (unlikely(!gr_acl_is_enabled()))
61334 + return 0;
61335 +
61336 + if (!new_dentry->d_inode) {
61337 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
61338 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
61339 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
61340 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
61341 + GR_DELETE | GR_AUDIT_DELETE |
61342 + GR_AUDIT_READ | GR_AUDIT_WRITE |
61343 + GR_SUPPRESS, old_mnt);
61344 + } else {
61345 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
61346 + GR_CREATE | GR_DELETE |
61347 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
61348 + GR_AUDIT_READ | GR_AUDIT_WRITE |
61349 + GR_SUPPRESS, parent_mnt);
61350 + comp2 =
61351 + gr_search_file(old_dentry,
61352 + GR_READ | GR_WRITE | GR_AUDIT_READ |
61353 + GR_DELETE | GR_AUDIT_DELETE |
61354 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
61355 + }
61356 +
61357 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
61358 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
61359 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61360 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
61361 + && !(comp2 & GR_SUPPRESS)) {
61362 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61363 + error = -EACCES;
61364 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
61365 + error = -EACCES;
61366 +
61367 + return error;
61368 +}
61369 +
61370 +void
61371 +gr_acl_handle_exit(void)
61372 +{
61373 + u16 id;
61374 + char *rolename;
61375 + struct file *exec_file;
61376 +
61377 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
61378 + !(current->role->roletype & GR_ROLE_PERSIST))) {
61379 + id = current->acl_role_id;
61380 + rolename = current->role->rolename;
61381 + gr_set_acls(1);
61382 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
61383 + }
61384 +
61385 + write_lock(&grsec_exec_file_lock);
61386 + exec_file = current->exec_file;
61387 + current->exec_file = NULL;
61388 + write_unlock(&grsec_exec_file_lock);
61389 +
61390 + if (exec_file)
61391 + fput(exec_file);
61392 +}
61393 +
61394 +int
61395 +gr_acl_handle_procpidmem(const struct task_struct *task)
61396 +{
61397 + if (unlikely(!gr_acl_is_enabled()))
61398 + return 0;
61399 +
61400 + if (task != current && task->acl->mode & GR_PROTPROCFD)
61401 + return -EACCES;
61402 +
61403 + return 0;
61404 +}
61405 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
61406 new file mode 100644
61407 index 0000000..cd07b96
61408 --- /dev/null
61409 +++ b/grsecurity/gracl_ip.c
61410 @@ -0,0 +1,382 @@
61411 +#include <linux/kernel.h>
61412 +#include <asm/uaccess.h>
61413 +#include <asm/errno.h>
61414 +#include <net/sock.h>
61415 +#include <linux/file.h>
61416 +#include <linux/fs.h>
61417 +#include <linux/net.h>
61418 +#include <linux/in.h>
61419 +#include <linux/skbuff.h>
61420 +#include <linux/ip.h>
61421 +#include <linux/udp.h>
61422 +#include <linux/smp_lock.h>
61423 +#include <linux/types.h>
61424 +#include <linux/sched.h>
61425 +#include <linux/netdevice.h>
61426 +#include <linux/inetdevice.h>
61427 +#include <linux/gracl.h>
61428 +#include <linux/grsecurity.h>
61429 +#include <linux/grinternal.h>
61430 +
61431 +#define GR_BIND 0x01
61432 +#define GR_CONNECT 0x02
61433 +#define GR_INVERT 0x04
61434 +#define GR_BINDOVERRIDE 0x08
61435 +#define GR_CONNECTOVERRIDE 0x10
61436 +#define GR_SOCK_FAMILY 0x20
61437 +
61438 +static const char * gr_protocols[IPPROTO_MAX] = {
61439 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
61440 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
61441 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
61442 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
61443 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
61444 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
61445 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
61446 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
61447 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
61448 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
61449 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
61450 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
61451 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
61452 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
61453 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
61454 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
61455 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
61456 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
61457 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
61458 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
61459 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
61460 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
61461 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
61462 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
61463 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
61464 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
61465 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
61466 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
61467 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
61468 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
61469 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
61470 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
61471 + };
61472 +
61473 +static const char * gr_socktypes[SOCK_MAX] = {
61474 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
61475 + "unknown:7", "unknown:8", "unknown:9", "packet"
61476 + };
61477 +
61478 +static const char * gr_sockfamilies[AF_MAX+1] = {
61479 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
61480 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
61481 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
61482 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
61483 + };
61484 +
61485 +const char *
61486 +gr_proto_to_name(unsigned char proto)
61487 +{
61488 + return gr_protocols[proto];
61489 +}
61490 +
61491 +const char *
61492 +gr_socktype_to_name(unsigned char type)
61493 +{
61494 + return gr_socktypes[type];
61495 +}
61496 +
61497 +const char *
61498 +gr_sockfamily_to_name(unsigned char family)
61499 +{
61500 + return gr_sockfamilies[family];
61501 +}
61502 +
61503 +int
61504 +gr_search_socket(const int domain, const int type, const int protocol)
61505 +{
61506 + struct acl_subject_label *curr;
61507 + const struct cred *cred = current_cred();
61508 +
61509 + if (unlikely(!gr_acl_is_enabled()))
61510 + goto exit;
61511 +
61512 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
61513 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
61514 + goto exit; // let the kernel handle it
61515 +
61516 + curr = current->acl;
61517 +
61518 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
61519 + /* the family is allowed, if this is PF_INET allow it only if
61520 + the extra sock type/protocol checks pass */
61521 + if (domain == PF_INET)
61522 + goto inet_check;
61523 + goto exit;
61524 + } else {
61525 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61526 + __u32 fakeip = 0;
61527 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61528 + current->role->roletype, cred->uid,
61529 + cred->gid, current->exec_file ?
61530 + gr_to_filename(current->exec_file->f_path.dentry,
61531 + current->exec_file->f_path.mnt) :
61532 + curr->filename, curr->filename,
61533 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
61534 + &current->signal->saved_ip);
61535 + goto exit;
61536 + }
61537 + goto exit_fail;
61538 + }
61539 +
61540 +inet_check:
61541 + /* the rest of this checking is for IPv4 only */
61542 + if (!curr->ips)
61543 + goto exit;
61544 +
61545 + if ((curr->ip_type & (1 << type)) &&
61546 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
61547 + goto exit;
61548 +
61549 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61550 + /* we don't place acls on raw sockets , and sometimes
61551 + dgram/ip sockets are opened for ioctl and not
61552 + bind/connect, so we'll fake a bind learn log */
61553 + if (type == SOCK_RAW || type == SOCK_PACKET) {
61554 + __u32 fakeip = 0;
61555 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61556 + current->role->roletype, cred->uid,
61557 + cred->gid, current->exec_file ?
61558 + gr_to_filename(current->exec_file->f_path.dentry,
61559 + current->exec_file->f_path.mnt) :
61560 + curr->filename, curr->filename,
61561 + &fakeip, 0, type,
61562 + protocol, GR_CONNECT, &current->signal->saved_ip);
61563 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
61564 + __u32 fakeip = 0;
61565 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61566 + current->role->roletype, cred->uid,
61567 + cred->gid, current->exec_file ?
61568 + gr_to_filename(current->exec_file->f_path.dentry,
61569 + current->exec_file->f_path.mnt) :
61570 + curr->filename, curr->filename,
61571 + &fakeip, 0, type,
61572 + protocol, GR_BIND, &current->signal->saved_ip);
61573 + }
61574 + /* we'll log when they use connect or bind */
61575 + goto exit;
61576 + }
61577 +
61578 +exit_fail:
61579 + if (domain == PF_INET)
61580 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
61581 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
61582 + else
61583 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
61584 + gr_socktype_to_name(type), protocol);
61585 +
61586 + return 0;
61587 +exit:
61588 + return 1;
61589 +}
61590 +
61591 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
61592 +{
61593 + if ((ip->mode & mode) &&
61594 + (ip_port >= ip->low) &&
61595 + (ip_port <= ip->high) &&
61596 + ((ntohl(ip_addr) & our_netmask) ==
61597 + (ntohl(our_addr) & our_netmask))
61598 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
61599 + && (ip->type & (1 << type))) {
61600 + if (ip->mode & GR_INVERT)
61601 + return 2; // specifically denied
61602 + else
61603 + return 1; // allowed
61604 + }
61605 +
61606 + return 0; // not specifically allowed, may continue parsing
61607 +}
61608 +
61609 +static int
61610 +gr_search_connectbind(const int full_mode, struct sock *sk,
61611 + struct sockaddr_in *addr, const int type)
61612 +{
61613 + char iface[IFNAMSIZ] = {0};
61614 + struct acl_subject_label *curr;
61615 + struct acl_ip_label *ip;
61616 + struct inet_sock *isk;
61617 + struct net_device *dev;
61618 + struct in_device *idev;
61619 + unsigned long i;
61620 + int ret;
61621 + int mode = full_mode & (GR_BIND | GR_CONNECT);
61622 + __u32 ip_addr = 0;
61623 + __u32 our_addr;
61624 + __u32 our_netmask;
61625 + char *p;
61626 + __u16 ip_port = 0;
61627 + const struct cred *cred = current_cred();
61628 +
61629 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
61630 + return 0;
61631 +
61632 + curr = current->acl;
61633 + isk = inet_sk(sk);
61634 +
61635 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
61636 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
61637 + addr->sin_addr.s_addr = curr->inaddr_any_override;
61638 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
61639 + struct sockaddr_in saddr;
61640 + int err;
61641 +
61642 + saddr.sin_family = AF_INET;
61643 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
61644 + saddr.sin_port = isk->sport;
61645 +
61646 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61647 + if (err)
61648 + return err;
61649 +
61650 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61651 + if (err)
61652 + return err;
61653 + }
61654 +
61655 + if (!curr->ips)
61656 + return 0;
61657 +
61658 + ip_addr = addr->sin_addr.s_addr;
61659 + ip_port = ntohs(addr->sin_port);
61660 +
61661 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61662 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61663 + current->role->roletype, cred->uid,
61664 + cred->gid, current->exec_file ?
61665 + gr_to_filename(current->exec_file->f_path.dentry,
61666 + current->exec_file->f_path.mnt) :
61667 + curr->filename, curr->filename,
61668 + &ip_addr, ip_port, type,
61669 + sk->sk_protocol, mode, &current->signal->saved_ip);
61670 + return 0;
61671 + }
61672 +
61673 + for (i = 0; i < curr->ip_num; i++) {
61674 + ip = *(curr->ips + i);
61675 + if (ip->iface != NULL) {
61676 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
61677 + p = strchr(iface, ':');
61678 + if (p != NULL)
61679 + *p = '\0';
61680 + dev = dev_get_by_name(sock_net(sk), iface);
61681 + if (dev == NULL)
61682 + continue;
61683 + idev = in_dev_get(dev);
61684 + if (idev == NULL) {
61685 + dev_put(dev);
61686 + continue;
61687 + }
61688 + rcu_read_lock();
61689 + for_ifa(idev) {
61690 + if (!strcmp(ip->iface, ifa->ifa_label)) {
61691 + our_addr = ifa->ifa_address;
61692 + our_netmask = 0xffffffff;
61693 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61694 + if (ret == 1) {
61695 + rcu_read_unlock();
61696 + in_dev_put(idev);
61697 + dev_put(dev);
61698 + return 0;
61699 + } else if (ret == 2) {
61700 + rcu_read_unlock();
61701 + in_dev_put(idev);
61702 + dev_put(dev);
61703 + goto denied;
61704 + }
61705 + }
61706 + } endfor_ifa(idev);
61707 + rcu_read_unlock();
61708 + in_dev_put(idev);
61709 + dev_put(dev);
61710 + } else {
61711 + our_addr = ip->addr;
61712 + our_netmask = ip->netmask;
61713 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61714 + if (ret == 1)
61715 + return 0;
61716 + else if (ret == 2)
61717 + goto denied;
61718 + }
61719 + }
61720 +
61721 +denied:
61722 + if (mode == GR_BIND)
61723 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61724 + else if (mode == GR_CONNECT)
61725 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61726 +
61727 + return -EACCES;
61728 +}
61729 +
61730 +int
61731 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61732 +{
61733 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61734 +}
61735 +
61736 +int
61737 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61738 +{
61739 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61740 +}
61741 +
61742 +int gr_search_listen(struct socket *sock)
61743 +{
61744 + struct sock *sk = sock->sk;
61745 + struct sockaddr_in addr;
61746 +
61747 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61748 + addr.sin_port = inet_sk(sk)->sport;
61749 +
61750 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61751 +}
61752 +
61753 +int gr_search_accept(struct socket *sock)
61754 +{
61755 + struct sock *sk = sock->sk;
61756 + struct sockaddr_in addr;
61757 +
61758 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61759 + addr.sin_port = inet_sk(sk)->sport;
61760 +
61761 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61762 +}
61763 +
61764 +int
61765 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61766 +{
61767 + if (addr)
61768 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61769 + else {
61770 + struct sockaddr_in sin;
61771 + const struct inet_sock *inet = inet_sk(sk);
61772 +
61773 + sin.sin_addr.s_addr = inet->daddr;
61774 + sin.sin_port = inet->dport;
61775 +
61776 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61777 + }
61778 +}
61779 +
61780 +int
61781 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61782 +{
61783 + struct sockaddr_in sin;
61784 +
61785 + if (unlikely(skb->len < sizeof (struct udphdr)))
61786 + return 0; // skip this packet
61787 +
61788 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61789 + sin.sin_port = udp_hdr(skb)->source;
61790 +
61791 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61792 +}
61793 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61794 new file mode 100644
61795 index 0000000..34bdd46
61796 --- /dev/null
61797 +++ b/grsecurity/gracl_learn.c
61798 @@ -0,0 +1,208 @@
61799 +#include <linux/kernel.h>
61800 +#include <linux/mm.h>
61801 +#include <linux/sched.h>
61802 +#include <linux/poll.h>
61803 +#include <linux/smp_lock.h>
61804 +#include <linux/string.h>
61805 +#include <linux/file.h>
61806 +#include <linux/types.h>
61807 +#include <linux/vmalloc.h>
61808 +#include <linux/grinternal.h>
61809 +
61810 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61811 + size_t count, loff_t *ppos);
61812 +extern int gr_acl_is_enabled(void);
61813 +
61814 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61815 +static int gr_learn_attached;
61816 +
61817 +/* use a 512k buffer */
61818 +#define LEARN_BUFFER_SIZE (512 * 1024)
61819 +
61820 +static DEFINE_SPINLOCK(gr_learn_lock);
61821 +static DEFINE_MUTEX(gr_learn_user_mutex);
61822 +
61823 +/* we need to maintain two buffers, so that the kernel context of grlearn
61824 + uses a semaphore around the userspace copying, and the other kernel contexts
61825 + use a spinlock when copying into the buffer, since they cannot sleep
61826 +*/
61827 +static char *learn_buffer;
61828 +static char *learn_buffer_user;
61829 +static int learn_buffer_len;
61830 +static int learn_buffer_user_len;
61831 +
61832 +static ssize_t
61833 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61834 +{
61835 + DECLARE_WAITQUEUE(wait, current);
61836 + ssize_t retval = 0;
61837 +
61838 + add_wait_queue(&learn_wait, &wait);
61839 + set_current_state(TASK_INTERRUPTIBLE);
61840 + do {
61841 + mutex_lock(&gr_learn_user_mutex);
61842 + spin_lock(&gr_learn_lock);
61843 + if (learn_buffer_len)
61844 + break;
61845 + spin_unlock(&gr_learn_lock);
61846 + mutex_unlock(&gr_learn_user_mutex);
61847 + if (file->f_flags & O_NONBLOCK) {
61848 + retval = -EAGAIN;
61849 + goto out;
61850 + }
61851 + if (signal_pending(current)) {
61852 + retval = -ERESTARTSYS;
61853 + goto out;
61854 + }
61855 +
61856 + schedule();
61857 + } while (1);
61858 +
61859 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61860 + learn_buffer_user_len = learn_buffer_len;
61861 + retval = learn_buffer_len;
61862 + learn_buffer_len = 0;
61863 +
61864 + spin_unlock(&gr_learn_lock);
61865 +
61866 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61867 + retval = -EFAULT;
61868 +
61869 + mutex_unlock(&gr_learn_user_mutex);
61870 +out:
61871 + set_current_state(TASK_RUNNING);
61872 + remove_wait_queue(&learn_wait, &wait);
61873 + return retval;
61874 +}
61875 +
61876 +static unsigned int
61877 +poll_learn(struct file * file, poll_table * wait)
61878 +{
61879 + poll_wait(file, &learn_wait, wait);
61880 +
61881 + if (learn_buffer_len)
61882 + return (POLLIN | POLLRDNORM);
61883 +
61884 + return 0;
61885 +}
61886 +
61887 +void
61888 +gr_clear_learn_entries(void)
61889 +{
61890 + char *tmp;
61891 +
61892 + mutex_lock(&gr_learn_user_mutex);
61893 + spin_lock(&gr_learn_lock);
61894 + tmp = learn_buffer;
61895 + learn_buffer = NULL;
61896 + spin_unlock(&gr_learn_lock);
61897 + if (tmp)
61898 + vfree(tmp);
61899 + if (learn_buffer_user != NULL) {
61900 + vfree(learn_buffer_user);
61901 + learn_buffer_user = NULL;
61902 + }
61903 + learn_buffer_len = 0;
61904 + mutex_unlock(&gr_learn_user_mutex);
61905 +
61906 + return;
61907 +}
61908 +
61909 +void
61910 +gr_add_learn_entry(const char *fmt, ...)
61911 +{
61912 + va_list args;
61913 + unsigned int len;
61914 +
61915 + if (!gr_learn_attached)
61916 + return;
61917 +
61918 + spin_lock(&gr_learn_lock);
61919 +
61920 + /* leave a gap at the end so we know when it's "full" but don't have to
61921 + compute the exact length of the string we're trying to append
61922 + */
61923 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
61924 + spin_unlock(&gr_learn_lock);
61925 + wake_up_interruptible(&learn_wait);
61926 + return;
61927 + }
61928 + if (learn_buffer == NULL) {
61929 + spin_unlock(&gr_learn_lock);
61930 + return;
61931 + }
61932 +
61933 + va_start(args, fmt);
61934 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
61935 + va_end(args);
61936 +
61937 + learn_buffer_len += len + 1;
61938 +
61939 + spin_unlock(&gr_learn_lock);
61940 + wake_up_interruptible(&learn_wait);
61941 +
61942 + return;
61943 +}
61944 +
61945 +static int
61946 +open_learn(struct inode *inode, struct file *file)
61947 +{
61948 + if (file->f_mode & FMODE_READ && gr_learn_attached)
61949 + return -EBUSY;
61950 + if (file->f_mode & FMODE_READ) {
61951 + int retval = 0;
61952 + mutex_lock(&gr_learn_user_mutex);
61953 + if (learn_buffer == NULL)
61954 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
61955 + if (learn_buffer_user == NULL)
61956 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
61957 + if (learn_buffer == NULL) {
61958 + retval = -ENOMEM;
61959 + goto out_error;
61960 + }
61961 + if (learn_buffer_user == NULL) {
61962 + retval = -ENOMEM;
61963 + goto out_error;
61964 + }
61965 + learn_buffer_len = 0;
61966 + learn_buffer_user_len = 0;
61967 + gr_learn_attached = 1;
61968 +out_error:
61969 + mutex_unlock(&gr_learn_user_mutex);
61970 + return retval;
61971 + }
61972 + return 0;
61973 +}
61974 +
61975 +static int
61976 +close_learn(struct inode *inode, struct file *file)
61977 +{
61978 + if (file->f_mode & FMODE_READ) {
61979 + char *tmp = NULL;
61980 + mutex_lock(&gr_learn_user_mutex);
61981 + spin_lock(&gr_learn_lock);
61982 + tmp = learn_buffer;
61983 + learn_buffer = NULL;
61984 + spin_unlock(&gr_learn_lock);
61985 + if (tmp)
61986 + vfree(tmp);
61987 + if (learn_buffer_user != NULL) {
61988 + vfree(learn_buffer_user);
61989 + learn_buffer_user = NULL;
61990 + }
61991 + learn_buffer_len = 0;
61992 + learn_buffer_user_len = 0;
61993 + gr_learn_attached = 0;
61994 + mutex_unlock(&gr_learn_user_mutex);
61995 + }
61996 +
61997 + return 0;
61998 +}
61999 +
62000 +const struct file_operations grsec_fops = {
62001 + .read = read_learn,
62002 + .write = write_grsec_handler,
62003 + .open = open_learn,
62004 + .release = close_learn,
62005 + .poll = poll_learn,
62006 +};
62007 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
62008 new file mode 100644
62009 index 0000000..70b2179
62010 --- /dev/null
62011 +++ b/grsecurity/gracl_res.c
62012 @@ -0,0 +1,67 @@
62013 +#include <linux/kernel.h>
62014 +#include <linux/sched.h>
62015 +#include <linux/gracl.h>
62016 +#include <linux/grinternal.h>
62017 +
62018 +static const char *restab_log[] = {
62019 + [RLIMIT_CPU] = "RLIMIT_CPU",
62020 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
62021 + [RLIMIT_DATA] = "RLIMIT_DATA",
62022 + [RLIMIT_STACK] = "RLIMIT_STACK",
62023 + [RLIMIT_CORE] = "RLIMIT_CORE",
62024 + [RLIMIT_RSS] = "RLIMIT_RSS",
62025 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
62026 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
62027 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
62028 + [RLIMIT_AS] = "RLIMIT_AS",
62029 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
62030 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
62031 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
62032 + [RLIMIT_NICE] = "RLIMIT_NICE",
62033 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
62034 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
62035 + [GR_CRASH_RES] = "RLIMIT_CRASH"
62036 +};
62037 +
62038 +void
62039 +gr_log_resource(const struct task_struct *task,
62040 + const int res, const unsigned long wanted, const int gt)
62041 +{
62042 + const struct cred *cred;
62043 + unsigned long rlim;
62044 +
62045 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
62046 + return;
62047 +
62048 + // not yet supported resource
62049 + if (unlikely(!restab_log[res]))
62050 + return;
62051 +
62052 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
62053 + rlim = task->signal->rlim[res].rlim_max;
62054 + else
62055 + rlim = task->signal->rlim[res].rlim_cur;
62056 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
62057 + return;
62058 +
62059 + rcu_read_lock();
62060 + cred = __task_cred(task);
62061 +
62062 + if (res == RLIMIT_NPROC &&
62063 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
62064 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
62065 + goto out_rcu_unlock;
62066 + else if (res == RLIMIT_MEMLOCK &&
62067 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
62068 + goto out_rcu_unlock;
62069 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
62070 + goto out_rcu_unlock;
62071 + rcu_read_unlock();
62072 +
62073 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
62074 +
62075 + return;
62076 +out_rcu_unlock:
62077 + rcu_read_unlock();
62078 + return;
62079 +}
62080 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
62081 new file mode 100644
62082 index 0000000..1d1b734
62083 --- /dev/null
62084 +++ b/grsecurity/gracl_segv.c
62085 @@ -0,0 +1,284 @@
62086 +#include <linux/kernel.h>
62087 +#include <linux/mm.h>
62088 +#include <asm/uaccess.h>
62089 +#include <asm/errno.h>
62090 +#include <asm/mman.h>
62091 +#include <net/sock.h>
62092 +#include <linux/file.h>
62093 +#include <linux/fs.h>
62094 +#include <linux/net.h>
62095 +#include <linux/in.h>
62096 +#include <linux/smp_lock.h>
62097 +#include <linux/slab.h>
62098 +#include <linux/types.h>
62099 +#include <linux/sched.h>
62100 +#include <linux/timer.h>
62101 +#include <linux/gracl.h>
62102 +#include <linux/grsecurity.h>
62103 +#include <linux/grinternal.h>
62104 +
62105 +static struct crash_uid *uid_set;
62106 +static unsigned short uid_used;
62107 +static DEFINE_SPINLOCK(gr_uid_lock);
62108 +extern rwlock_t gr_inode_lock;
62109 +extern struct acl_subject_label *
62110 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
62111 + struct acl_role_label *role);
62112 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
62113 +
62114 +int
62115 +gr_init_uidset(void)
62116 +{
62117 + uid_set =
62118 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
62119 + uid_used = 0;
62120 +
62121 + return uid_set ? 1 : 0;
62122 +}
62123 +
62124 +void
62125 +gr_free_uidset(void)
62126 +{
62127 + if (uid_set)
62128 + kfree(uid_set);
62129 +
62130 + return;
62131 +}
62132 +
62133 +int
62134 +gr_find_uid(const uid_t uid)
62135 +{
62136 + struct crash_uid *tmp = uid_set;
62137 + uid_t buid;
62138 + int low = 0, high = uid_used - 1, mid;
62139 +
62140 + while (high >= low) {
62141 + mid = (low + high) >> 1;
62142 + buid = tmp[mid].uid;
62143 + if (buid == uid)
62144 + return mid;
62145 + if (buid > uid)
62146 + high = mid - 1;
62147 + if (buid < uid)
62148 + low = mid + 1;
62149 + }
62150 +
62151 + return -1;
62152 +}
62153 +
62154 +static __inline__ void
62155 +gr_insertsort(void)
62156 +{
62157 + unsigned short i, j;
62158 + struct crash_uid index;
62159 +
62160 + for (i = 1; i < uid_used; i++) {
62161 + index = uid_set[i];
62162 + j = i;
62163 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
62164 + uid_set[j] = uid_set[j - 1];
62165 + j--;
62166 + }
62167 + uid_set[j] = index;
62168 + }
62169 +
62170 + return;
62171 +}
62172 +
62173 +static __inline__ void
62174 +gr_insert_uid(const uid_t uid, const unsigned long expires)
62175 +{
62176 + int loc;
62177 +
62178 + if (uid_used == GR_UIDTABLE_MAX)
62179 + return;
62180 +
62181 + loc = gr_find_uid(uid);
62182 +
62183 + if (loc >= 0) {
62184 + uid_set[loc].expires = expires;
62185 + return;
62186 + }
62187 +
62188 + uid_set[uid_used].uid = uid;
62189 + uid_set[uid_used].expires = expires;
62190 + uid_used++;
62191 +
62192 + gr_insertsort();
62193 +
62194 + return;
62195 +}
62196 +
62197 +void
62198 +gr_remove_uid(const unsigned short loc)
62199 +{
62200 + unsigned short i;
62201 +
62202 + for (i = loc + 1; i < uid_used; i++)
62203 + uid_set[i - 1] = uid_set[i];
62204 +
62205 + uid_used--;
62206 +
62207 + return;
62208 +}
62209 +
62210 +int
62211 +gr_check_crash_uid(const uid_t uid)
62212 +{
62213 + int loc;
62214 + int ret = 0;
62215 +
62216 + if (unlikely(!gr_acl_is_enabled()))
62217 + return 0;
62218 +
62219 + spin_lock(&gr_uid_lock);
62220 + loc = gr_find_uid(uid);
62221 +
62222 + if (loc < 0)
62223 + goto out_unlock;
62224 +
62225 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
62226 + gr_remove_uid(loc);
62227 + else
62228 + ret = 1;
62229 +
62230 +out_unlock:
62231 + spin_unlock(&gr_uid_lock);
62232 + return ret;
62233 +}
62234 +
62235 +static __inline__ int
62236 +proc_is_setxid(const struct cred *cred)
62237 +{
62238 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
62239 + cred->uid != cred->fsuid)
62240 + return 1;
62241 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
62242 + cred->gid != cred->fsgid)
62243 + return 1;
62244 +
62245 + return 0;
62246 +}
62247 +
62248 +void
62249 +gr_handle_crash(struct task_struct *task, const int sig)
62250 +{
62251 + struct acl_subject_label *curr;
62252 + struct task_struct *tsk, *tsk2;
62253 + const struct cred *cred;
62254 + const struct cred *cred2;
62255 +
62256 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
62257 + return;
62258 +
62259 + if (unlikely(!gr_acl_is_enabled()))
62260 + return;
62261 +
62262 + curr = task->acl;
62263 +
62264 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
62265 + return;
62266 +
62267 + if (time_before_eq(curr->expires, get_seconds())) {
62268 + curr->expires = 0;
62269 + curr->crashes = 0;
62270 + }
62271 +
62272 + curr->crashes++;
62273 +
62274 + if (!curr->expires)
62275 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
62276 +
62277 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62278 + time_after(curr->expires, get_seconds())) {
62279 + rcu_read_lock();
62280 + cred = __task_cred(task);
62281 + if (cred->uid && proc_is_setxid(cred)) {
62282 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62283 + spin_lock(&gr_uid_lock);
62284 + gr_insert_uid(cred->uid, curr->expires);
62285 + spin_unlock(&gr_uid_lock);
62286 + curr->expires = 0;
62287 + curr->crashes = 0;
62288 + read_lock(&tasklist_lock);
62289 + do_each_thread(tsk2, tsk) {
62290 + cred2 = __task_cred(tsk);
62291 + if (tsk != task && cred2->uid == cred->uid)
62292 + gr_fake_force_sig(SIGKILL, tsk);
62293 + } while_each_thread(tsk2, tsk);
62294 + read_unlock(&tasklist_lock);
62295 + } else {
62296 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62297 + read_lock(&tasklist_lock);
62298 + read_lock(&grsec_exec_file_lock);
62299 + do_each_thread(tsk2, tsk) {
62300 + if (likely(tsk != task)) {
62301 + // if this thread has the same subject as the one that triggered
62302 + // RES_CRASH and it's the same binary, kill it
62303 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
62304 + gr_fake_force_sig(SIGKILL, tsk);
62305 + }
62306 + } while_each_thread(tsk2, tsk);
62307 + read_unlock(&grsec_exec_file_lock);
62308 + read_unlock(&tasklist_lock);
62309 + }
62310 + rcu_read_unlock();
62311 + }
62312 +
62313 + return;
62314 +}
62315 +
62316 +int
62317 +gr_check_crash_exec(const struct file *filp)
62318 +{
62319 + struct acl_subject_label *curr;
62320 +
62321 + if (unlikely(!gr_acl_is_enabled()))
62322 + return 0;
62323 +
62324 + read_lock(&gr_inode_lock);
62325 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
62326 + filp->f_path.dentry->d_inode->i_sb->s_dev,
62327 + current->role);
62328 + read_unlock(&gr_inode_lock);
62329 +
62330 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
62331 + (!curr->crashes && !curr->expires))
62332 + return 0;
62333 +
62334 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62335 + time_after(curr->expires, get_seconds()))
62336 + return 1;
62337 + else if (time_before_eq(curr->expires, get_seconds())) {
62338 + curr->crashes = 0;
62339 + curr->expires = 0;
62340 + }
62341 +
62342 + return 0;
62343 +}
62344 +
62345 +void
62346 +gr_handle_alertkill(struct task_struct *task)
62347 +{
62348 + struct acl_subject_label *curracl;
62349 + __u32 curr_ip;
62350 + struct task_struct *p, *p2;
62351 +
62352 + if (unlikely(!gr_acl_is_enabled()))
62353 + return;
62354 +
62355 + curracl = task->acl;
62356 + curr_ip = task->signal->curr_ip;
62357 +
62358 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
62359 + read_lock(&tasklist_lock);
62360 + do_each_thread(p2, p) {
62361 + if (p->signal->curr_ip == curr_ip)
62362 + gr_fake_force_sig(SIGKILL, p);
62363 + } while_each_thread(p2, p);
62364 + read_unlock(&tasklist_lock);
62365 + } else if (curracl->mode & GR_KILLPROC)
62366 + gr_fake_force_sig(SIGKILL, task);
62367 +
62368 + return;
62369 +}
62370 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
62371 new file mode 100644
62372 index 0000000..9d83a69
62373 --- /dev/null
62374 +++ b/grsecurity/gracl_shm.c
62375 @@ -0,0 +1,40 @@
62376 +#include <linux/kernel.h>
62377 +#include <linux/mm.h>
62378 +#include <linux/sched.h>
62379 +#include <linux/file.h>
62380 +#include <linux/ipc.h>
62381 +#include <linux/gracl.h>
62382 +#include <linux/grsecurity.h>
62383 +#include <linux/grinternal.h>
62384 +
62385 +int
62386 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62387 + const time_t shm_createtime, const uid_t cuid, const int shmid)
62388 +{
62389 + struct task_struct *task;
62390 +
62391 + if (!gr_acl_is_enabled())
62392 + return 1;
62393 +
62394 + rcu_read_lock();
62395 + read_lock(&tasklist_lock);
62396 +
62397 + task = find_task_by_vpid(shm_cprid);
62398 +
62399 + if (unlikely(!task))
62400 + task = find_task_by_vpid(shm_lapid);
62401 +
62402 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
62403 + (task->pid == shm_lapid)) &&
62404 + (task->acl->mode & GR_PROTSHM) &&
62405 + (task->acl != current->acl))) {
62406 + read_unlock(&tasklist_lock);
62407 + rcu_read_unlock();
62408 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
62409 + return 0;
62410 + }
62411 + read_unlock(&tasklist_lock);
62412 + rcu_read_unlock();
62413 +
62414 + return 1;
62415 +}
62416 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
62417 new file mode 100644
62418 index 0000000..bc0be01
62419 --- /dev/null
62420 +++ b/grsecurity/grsec_chdir.c
62421 @@ -0,0 +1,19 @@
62422 +#include <linux/kernel.h>
62423 +#include <linux/sched.h>
62424 +#include <linux/fs.h>
62425 +#include <linux/file.h>
62426 +#include <linux/grsecurity.h>
62427 +#include <linux/grinternal.h>
62428 +
62429 +void
62430 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
62431 +{
62432 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62433 + if ((grsec_enable_chdir && grsec_enable_group &&
62434 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
62435 + !grsec_enable_group)) {
62436 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
62437 + }
62438 +#endif
62439 + return;
62440 +}
62441 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
62442 new file mode 100644
62443 index 0000000..197bdd5
62444 --- /dev/null
62445 +++ b/grsecurity/grsec_chroot.c
62446 @@ -0,0 +1,386 @@
62447 +#include <linux/kernel.h>
62448 +#include <linux/module.h>
62449 +#include <linux/sched.h>
62450 +#include <linux/file.h>
62451 +#include <linux/fs.h>
62452 +#include <linux/mount.h>
62453 +#include <linux/types.h>
62454 +#include <linux/pid_namespace.h>
62455 +#include <linux/grsecurity.h>
62456 +#include <linux/grinternal.h>
62457 +
62458 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
62459 +{
62460 +#ifdef CONFIG_GRKERNSEC
62461 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
62462 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
62463 + task->gr_is_chrooted = 1;
62464 + else
62465 + task->gr_is_chrooted = 0;
62466 +
62467 + task->gr_chroot_dentry = path->dentry;
62468 +#endif
62469 + return;
62470 +}
62471 +
62472 +void gr_clear_chroot_entries(struct task_struct *task)
62473 +{
62474 +#ifdef CONFIG_GRKERNSEC
62475 + task->gr_is_chrooted = 0;
62476 + task->gr_chroot_dentry = NULL;
62477 +#endif
62478 + return;
62479 +}
62480 +
62481 +int
62482 +gr_handle_chroot_unix(const pid_t pid)
62483 +{
62484 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62485 + struct task_struct *p;
62486 +
62487 + if (unlikely(!grsec_enable_chroot_unix))
62488 + return 1;
62489 +
62490 + if (likely(!proc_is_chrooted(current)))
62491 + return 1;
62492 +
62493 + rcu_read_lock();
62494 + read_lock(&tasklist_lock);
62495 +
62496 + p = find_task_by_vpid_unrestricted(pid);
62497 + if (unlikely(p && !have_same_root(current, p))) {
62498 + read_unlock(&tasklist_lock);
62499 + rcu_read_unlock();
62500 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
62501 + return 0;
62502 + }
62503 + read_unlock(&tasklist_lock);
62504 + rcu_read_unlock();
62505 +#endif
62506 + return 1;
62507 +}
62508 +
62509 +int
62510 +gr_handle_chroot_nice(void)
62511 +{
62512 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62513 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
62514 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
62515 + return -EPERM;
62516 + }
62517 +#endif
62518 + return 0;
62519 +}
62520 +
62521 +int
62522 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
62523 +{
62524 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62525 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
62526 + && proc_is_chrooted(current)) {
62527 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
62528 + return -EACCES;
62529 + }
62530 +#endif
62531 + return 0;
62532 +}
62533 +
62534 +int
62535 +gr_handle_chroot_rawio(const struct inode *inode)
62536 +{
62537 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62538 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
62539 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
62540 + return 1;
62541 +#endif
62542 + return 0;
62543 +}
62544 +
62545 +int
62546 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
62547 +{
62548 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62549 + struct task_struct *p;
62550 + int ret = 0;
62551 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
62552 + return ret;
62553 +
62554 + read_lock(&tasklist_lock);
62555 + do_each_pid_task(pid, type, p) {
62556 + if (!have_same_root(current, p)) {
62557 + ret = 1;
62558 + goto out;
62559 + }
62560 + } while_each_pid_task(pid, type, p);
62561 +out:
62562 + read_unlock(&tasklist_lock);
62563 + return ret;
62564 +#endif
62565 + return 0;
62566 +}
62567 +
62568 +int
62569 +gr_pid_is_chrooted(struct task_struct *p)
62570 +{
62571 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62572 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
62573 + return 0;
62574 +
62575 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
62576 + !have_same_root(current, p)) {
62577 + return 1;
62578 + }
62579 +#endif
62580 + return 0;
62581 +}
62582 +
62583 +EXPORT_SYMBOL(gr_pid_is_chrooted);
62584 +
62585 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
62586 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
62587 +{
62588 + struct dentry *dentry = (struct dentry *)u_dentry;
62589 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
62590 + struct dentry *realroot;
62591 + struct vfsmount *realrootmnt;
62592 + struct dentry *currentroot;
62593 + struct vfsmount *currentmnt;
62594 + struct task_struct *reaper = &init_task;
62595 + int ret = 1;
62596 +
62597 + read_lock(&reaper->fs->lock);
62598 + realrootmnt = mntget(reaper->fs->root.mnt);
62599 + realroot = dget(reaper->fs->root.dentry);
62600 + read_unlock(&reaper->fs->lock);
62601 +
62602 + read_lock(&current->fs->lock);
62603 + currentmnt = mntget(current->fs->root.mnt);
62604 + currentroot = dget(current->fs->root.dentry);
62605 + read_unlock(&current->fs->lock);
62606 +
62607 + spin_lock(&dcache_lock);
62608 + for (;;) {
62609 + if (unlikely((dentry == realroot && mnt == realrootmnt)
62610 + || (dentry == currentroot && mnt == currentmnt)))
62611 + break;
62612 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
62613 + if (mnt->mnt_parent == mnt)
62614 + break;
62615 + dentry = mnt->mnt_mountpoint;
62616 + mnt = mnt->mnt_parent;
62617 + continue;
62618 + }
62619 + dentry = dentry->d_parent;
62620 + }
62621 + spin_unlock(&dcache_lock);
62622 +
62623 + dput(currentroot);
62624 + mntput(currentmnt);
62625 +
62626 + /* access is outside of chroot */
62627 + if (dentry == realroot && mnt == realrootmnt)
62628 + ret = 0;
62629 +
62630 + dput(realroot);
62631 + mntput(realrootmnt);
62632 + return ret;
62633 +}
62634 +#endif
62635 +
62636 +int
62637 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
62638 +{
62639 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62640 + if (!grsec_enable_chroot_fchdir)
62641 + return 1;
62642 +
62643 + if (!proc_is_chrooted(current))
62644 + return 1;
62645 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
62646 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
62647 + return 0;
62648 + }
62649 +#endif
62650 + return 1;
62651 +}
62652 +
62653 +int
62654 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62655 + const time_t shm_createtime)
62656 +{
62657 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62658 + struct task_struct *p;
62659 + time_t starttime;
62660 +
62661 + if (unlikely(!grsec_enable_chroot_shmat))
62662 + return 1;
62663 +
62664 + if (likely(!proc_is_chrooted(current)))
62665 + return 1;
62666 +
62667 + rcu_read_lock();
62668 + read_lock(&tasklist_lock);
62669 +
62670 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
62671 + starttime = p->start_time.tv_sec;
62672 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
62673 + if (have_same_root(current, p)) {
62674 + goto allow;
62675 + } else {
62676 + read_unlock(&tasklist_lock);
62677 + rcu_read_unlock();
62678 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62679 + return 0;
62680 + }
62681 + }
62682 + /* creator exited, pid reuse, fall through to next check */
62683 + }
62684 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
62685 + if (unlikely(!have_same_root(current, p))) {
62686 + read_unlock(&tasklist_lock);
62687 + rcu_read_unlock();
62688 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62689 + return 0;
62690 + }
62691 + }
62692 +
62693 +allow:
62694 + read_unlock(&tasklist_lock);
62695 + rcu_read_unlock();
62696 +#endif
62697 + return 1;
62698 +}
62699 +
62700 +void
62701 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
62702 +{
62703 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62704 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
62705 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
62706 +#endif
62707 + return;
62708 +}
62709 +
62710 +int
62711 +gr_handle_chroot_mknod(const struct dentry *dentry,
62712 + const struct vfsmount *mnt, const int mode)
62713 +{
62714 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62715 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
62716 + proc_is_chrooted(current)) {
62717 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
62718 + return -EPERM;
62719 + }
62720 +#endif
62721 + return 0;
62722 +}
62723 +
62724 +int
62725 +gr_handle_chroot_mount(const struct dentry *dentry,
62726 + const struct vfsmount *mnt, const char *dev_name)
62727 +{
62728 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62729 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62730 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
62731 + return -EPERM;
62732 + }
62733 +#endif
62734 + return 0;
62735 +}
62736 +
62737 +int
62738 +gr_handle_chroot_pivot(void)
62739 +{
62740 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62741 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62742 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62743 + return -EPERM;
62744 + }
62745 +#endif
62746 + return 0;
62747 +}
62748 +
62749 +int
62750 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62751 +{
62752 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62753 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62754 + !gr_is_outside_chroot(dentry, mnt)) {
62755 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62756 + return -EPERM;
62757 + }
62758 +#endif
62759 + return 0;
62760 +}
62761 +
62762 +extern const char *captab_log[];
62763 +extern int captab_log_entries;
62764 +
62765 +int
62766 +gr_chroot_is_capable(const int cap)
62767 +{
62768 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62769 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62770 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62771 + if (cap_raised(chroot_caps, cap)) {
62772 + const struct cred *creds = current_cred();
62773 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
62774 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
62775 + }
62776 + return 0;
62777 + }
62778 + }
62779 +#endif
62780 + return 1;
62781 +}
62782 +
62783 +int
62784 +gr_chroot_is_capable_nolog(const int cap)
62785 +{
62786 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62787 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62788 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62789 + if (cap_raised(chroot_caps, cap)) {
62790 + return 0;
62791 + }
62792 + }
62793 +#endif
62794 + return 1;
62795 +}
62796 +
62797 +int
62798 +gr_handle_chroot_sysctl(const int op)
62799 +{
62800 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62801 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
62802 + && (op & MAY_WRITE))
62803 + return -EACCES;
62804 +#endif
62805 + return 0;
62806 +}
62807 +
62808 +void
62809 +gr_handle_chroot_chdir(struct path *path)
62810 +{
62811 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62812 + if (grsec_enable_chroot_chdir)
62813 + set_fs_pwd(current->fs, path);
62814 +#endif
62815 + return;
62816 +}
62817 +
62818 +int
62819 +gr_handle_chroot_chmod(const struct dentry *dentry,
62820 + const struct vfsmount *mnt, const int mode)
62821 +{
62822 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62823 + /* allow chmod +s on directories, but not on files */
62824 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62825 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62826 + proc_is_chrooted(current)) {
62827 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62828 + return -EPERM;
62829 + }
62830 +#endif
62831 + return 0;
62832 +}
62833 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62834 new file mode 100644
62835 index 0000000..40545bf
62836 --- /dev/null
62837 +++ b/grsecurity/grsec_disabled.c
62838 @@ -0,0 +1,437 @@
62839 +#include <linux/kernel.h>
62840 +#include <linux/module.h>
62841 +#include <linux/sched.h>
62842 +#include <linux/file.h>
62843 +#include <linux/fs.h>
62844 +#include <linux/kdev_t.h>
62845 +#include <linux/net.h>
62846 +#include <linux/in.h>
62847 +#include <linux/ip.h>
62848 +#include <linux/skbuff.h>
62849 +#include <linux/sysctl.h>
62850 +
62851 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62852 +void
62853 +pax_set_initial_flags(struct linux_binprm *bprm)
62854 +{
62855 + return;
62856 +}
62857 +#endif
62858 +
62859 +#ifdef CONFIG_SYSCTL
62860 +__u32
62861 +gr_handle_sysctl(const struct ctl_table * table, const int op)
62862 +{
62863 + return 0;
62864 +}
62865 +#endif
62866 +
62867 +#ifdef CONFIG_TASKSTATS
62868 +int gr_is_taskstats_denied(int pid)
62869 +{
62870 + return 0;
62871 +}
62872 +#endif
62873 +
62874 +int
62875 +gr_acl_is_enabled(void)
62876 +{
62877 + return 0;
62878 +}
62879 +
62880 +void
62881 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62882 +{
62883 + return;
62884 +}
62885 +
62886 +int
62887 +gr_handle_rawio(const struct inode *inode)
62888 +{
62889 + return 0;
62890 +}
62891 +
62892 +void
62893 +gr_acl_handle_psacct(struct task_struct *task, const long code)
62894 +{
62895 + return;
62896 +}
62897 +
62898 +int
62899 +gr_handle_ptrace(struct task_struct *task, const long request)
62900 +{
62901 + return 0;
62902 +}
62903 +
62904 +int
62905 +gr_handle_proc_ptrace(struct task_struct *task)
62906 +{
62907 + return 0;
62908 +}
62909 +
62910 +void
62911 +gr_learn_resource(const struct task_struct *task,
62912 + const int res, const unsigned long wanted, const int gt)
62913 +{
62914 + return;
62915 +}
62916 +
62917 +int
62918 +gr_set_acls(const int type)
62919 +{
62920 + return 0;
62921 +}
62922 +
62923 +int
62924 +gr_check_hidden_task(const struct task_struct *tsk)
62925 +{
62926 + return 0;
62927 +}
62928 +
62929 +int
62930 +gr_check_protected_task(const struct task_struct *task)
62931 +{
62932 + return 0;
62933 +}
62934 +
62935 +int
62936 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
62937 +{
62938 + return 0;
62939 +}
62940 +
62941 +void
62942 +gr_copy_label(struct task_struct *tsk)
62943 +{
62944 + return;
62945 +}
62946 +
62947 +void
62948 +gr_set_pax_flags(struct task_struct *task)
62949 +{
62950 + return;
62951 +}
62952 +
62953 +int
62954 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
62955 + const int unsafe_share)
62956 +{
62957 + return 0;
62958 +}
62959 +
62960 +void
62961 +gr_handle_delete(const ino_t ino, const dev_t dev)
62962 +{
62963 + return;
62964 +}
62965 +
62966 +void
62967 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
62968 +{
62969 + return;
62970 +}
62971 +
62972 +void
62973 +gr_handle_crash(struct task_struct *task, const int sig)
62974 +{
62975 + return;
62976 +}
62977 +
62978 +int
62979 +gr_check_crash_exec(const struct file *filp)
62980 +{
62981 + return 0;
62982 +}
62983 +
62984 +int
62985 +gr_check_crash_uid(const uid_t uid)
62986 +{
62987 + return 0;
62988 +}
62989 +
62990 +void
62991 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62992 + struct dentry *old_dentry,
62993 + struct dentry *new_dentry,
62994 + struct vfsmount *mnt, const __u8 replace)
62995 +{
62996 + return;
62997 +}
62998 +
62999 +int
63000 +gr_search_socket(const int family, const int type, const int protocol)
63001 +{
63002 + return 1;
63003 +}
63004 +
63005 +int
63006 +gr_search_connectbind(const int mode, const struct socket *sock,
63007 + const struct sockaddr_in *addr)
63008 +{
63009 + return 0;
63010 +}
63011 +
63012 +void
63013 +gr_handle_alertkill(struct task_struct *task)
63014 +{
63015 + return;
63016 +}
63017 +
63018 +__u32
63019 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
63020 +{
63021 + return 1;
63022 +}
63023 +
63024 +__u32
63025 +gr_acl_handle_hidden_file(const struct dentry * dentry,
63026 + const struct vfsmount * mnt)
63027 +{
63028 + return 1;
63029 +}
63030 +
63031 +__u32
63032 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
63033 + int acc_mode)
63034 +{
63035 + return 1;
63036 +}
63037 +
63038 +__u32
63039 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
63040 +{
63041 + return 1;
63042 +}
63043 +
63044 +__u32
63045 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
63046 +{
63047 + return 1;
63048 +}
63049 +
63050 +int
63051 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
63052 + unsigned int *vm_flags)
63053 +{
63054 + return 1;
63055 +}
63056 +
63057 +__u32
63058 +gr_acl_handle_truncate(const struct dentry * dentry,
63059 + const struct vfsmount * mnt)
63060 +{
63061 + return 1;
63062 +}
63063 +
63064 +__u32
63065 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
63066 +{
63067 + return 1;
63068 +}
63069 +
63070 +__u32
63071 +gr_acl_handle_access(const struct dentry * dentry,
63072 + const struct vfsmount * mnt, const int fmode)
63073 +{
63074 + return 1;
63075 +}
63076 +
63077 +__u32
63078 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
63079 + umode_t *mode)
63080 +{
63081 + return 1;
63082 +}
63083 +
63084 +__u32
63085 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
63086 +{
63087 + return 1;
63088 +}
63089 +
63090 +__u32
63091 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
63092 +{
63093 + return 1;
63094 +}
63095 +
63096 +void
63097 +grsecurity_init(void)
63098 +{
63099 + return;
63100 +}
63101 +
63102 +umode_t gr_acl_umask(void)
63103 +{
63104 + return 0;
63105 +}
63106 +
63107 +__u32
63108 +gr_acl_handle_mknod(const struct dentry * new_dentry,
63109 + const struct dentry * parent_dentry,
63110 + const struct vfsmount * parent_mnt,
63111 + const int mode)
63112 +{
63113 + return 1;
63114 +}
63115 +
63116 +__u32
63117 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
63118 + const struct dentry * parent_dentry,
63119 + const struct vfsmount * parent_mnt)
63120 +{
63121 + return 1;
63122 +}
63123 +
63124 +__u32
63125 +gr_acl_handle_symlink(const struct dentry * new_dentry,
63126 + const struct dentry * parent_dentry,
63127 + const struct vfsmount * parent_mnt, const char *from)
63128 +{
63129 + return 1;
63130 +}
63131 +
63132 +__u32
63133 +gr_acl_handle_link(const struct dentry * new_dentry,
63134 + const struct dentry * parent_dentry,
63135 + const struct vfsmount * parent_mnt,
63136 + const struct dentry * old_dentry,
63137 + const struct vfsmount * old_mnt, const char *to)
63138 +{
63139 + return 1;
63140 +}
63141 +
63142 +int
63143 +gr_acl_handle_rename(const struct dentry *new_dentry,
63144 + const struct dentry *parent_dentry,
63145 + const struct vfsmount *parent_mnt,
63146 + const struct dentry *old_dentry,
63147 + const struct inode *old_parent_inode,
63148 + const struct vfsmount *old_mnt, const char *newname)
63149 +{
63150 + return 0;
63151 +}
63152 +
63153 +int
63154 +gr_acl_handle_filldir(const struct file *file, const char *name,
63155 + const int namelen, const ino_t ino)
63156 +{
63157 + return 1;
63158 +}
63159 +
63160 +int
63161 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63162 + const time_t shm_createtime, const uid_t cuid, const int shmid)
63163 +{
63164 + return 1;
63165 +}
63166 +
63167 +int
63168 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
63169 +{
63170 + return 0;
63171 +}
63172 +
63173 +int
63174 +gr_search_accept(const struct socket *sock)
63175 +{
63176 + return 0;
63177 +}
63178 +
63179 +int
63180 +gr_search_listen(const struct socket *sock)
63181 +{
63182 + return 0;
63183 +}
63184 +
63185 +int
63186 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
63187 +{
63188 + return 0;
63189 +}
63190 +
63191 +__u32
63192 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
63193 +{
63194 + return 1;
63195 +}
63196 +
63197 +__u32
63198 +gr_acl_handle_creat(const struct dentry * dentry,
63199 + const struct dentry * p_dentry,
63200 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
63201 + const int imode)
63202 +{
63203 + return 1;
63204 +}
63205 +
63206 +void
63207 +gr_acl_handle_exit(void)
63208 +{
63209 + return;
63210 +}
63211 +
63212 +int
63213 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
63214 +{
63215 + return 1;
63216 +}
63217 +
63218 +void
63219 +gr_set_role_label(const uid_t uid, const gid_t gid)
63220 +{
63221 + return;
63222 +}
63223 +
63224 +int
63225 +gr_acl_handle_procpidmem(const struct task_struct *task)
63226 +{
63227 + return 0;
63228 +}
63229 +
63230 +int
63231 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
63232 +{
63233 + return 0;
63234 +}
63235 +
63236 +int
63237 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
63238 +{
63239 + return 0;
63240 +}
63241 +
63242 +void
63243 +gr_set_kernel_label(struct task_struct *task)
63244 +{
63245 + return;
63246 +}
63247 +
63248 +int
63249 +gr_check_user_change(int real, int effective, int fs)
63250 +{
63251 + return 0;
63252 +}
63253 +
63254 +int
63255 +gr_check_group_change(int real, int effective, int fs)
63256 +{
63257 + return 0;
63258 +}
63259 +
63260 +int gr_acl_enable_at_secure(void)
63261 +{
63262 + return 0;
63263 +}
63264 +
63265 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
63266 +{
63267 + return dentry->d_inode->i_sb->s_dev;
63268 +}
63269 +
63270 +EXPORT_SYMBOL(gr_learn_resource);
63271 +EXPORT_SYMBOL(gr_set_kernel_label);
63272 +#ifdef CONFIG_SECURITY
63273 +EXPORT_SYMBOL(gr_check_user_change);
63274 +EXPORT_SYMBOL(gr_check_group_change);
63275 +#endif
63276 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
63277 new file mode 100644
63278 index 0000000..a96e155
63279 --- /dev/null
63280 +++ b/grsecurity/grsec_exec.c
63281 @@ -0,0 +1,204 @@
63282 +#include <linux/kernel.h>
63283 +#include <linux/sched.h>
63284 +#include <linux/file.h>
63285 +#include <linux/binfmts.h>
63286 +#include <linux/smp_lock.h>
63287 +#include <linux/fs.h>
63288 +#include <linux/types.h>
63289 +#include <linux/grdefs.h>
63290 +#include <linux/grinternal.h>
63291 +#include <linux/capability.h>
63292 +#include <linux/compat.h>
63293 +#include <linux/module.h>
63294 +
63295 +#include <asm/uaccess.h>
63296 +
63297 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63298 +static char gr_exec_arg_buf[132];
63299 +static DEFINE_MUTEX(gr_exec_arg_mutex);
63300 +#endif
63301 +
63302 +void
63303 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
63304 +{
63305 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63306 + char *grarg = gr_exec_arg_buf;
63307 + unsigned int i, x, execlen = 0;
63308 + char c;
63309 +
63310 + if (!((grsec_enable_execlog && grsec_enable_group &&
63311 + in_group_p(grsec_audit_gid))
63312 + || (grsec_enable_execlog && !grsec_enable_group)))
63313 + return;
63314 +
63315 + mutex_lock(&gr_exec_arg_mutex);
63316 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
63317 +
63318 + if (unlikely(argv == NULL))
63319 + goto log;
63320 +
63321 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
63322 + const char __user *p;
63323 + unsigned int len;
63324 +
63325 + if (copy_from_user(&p, argv + i, sizeof(p)))
63326 + goto log;
63327 + if (!p)
63328 + goto log;
63329 + len = strnlen_user(p, 128 - execlen);
63330 + if (len > 128 - execlen)
63331 + len = 128 - execlen;
63332 + else if (len > 0)
63333 + len--;
63334 + if (copy_from_user(grarg + execlen, p, len))
63335 + goto log;
63336 +
63337 + /* rewrite unprintable characters */
63338 + for (x = 0; x < len; x++) {
63339 + c = *(grarg + execlen + x);
63340 + if (c < 32 || c > 126)
63341 + *(grarg + execlen + x) = ' ';
63342 + }
63343 +
63344 + execlen += len;
63345 + *(grarg + execlen) = ' ';
63346 + *(grarg + execlen + 1) = '\0';
63347 + execlen++;
63348 + }
63349 +
63350 + log:
63351 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63352 + bprm->file->f_path.mnt, grarg);
63353 + mutex_unlock(&gr_exec_arg_mutex);
63354 +#endif
63355 + return;
63356 +}
63357 +
63358 +#ifdef CONFIG_COMPAT
63359 +void
63360 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
63361 +{
63362 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63363 + char *grarg = gr_exec_arg_buf;
63364 + unsigned int i, x, execlen = 0;
63365 + char c;
63366 +
63367 + if (!((grsec_enable_execlog && grsec_enable_group &&
63368 + in_group_p(grsec_audit_gid))
63369 + || (grsec_enable_execlog && !grsec_enable_group)))
63370 + return;
63371 +
63372 + mutex_lock(&gr_exec_arg_mutex);
63373 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
63374 +
63375 + if (unlikely(argv == NULL))
63376 + goto log;
63377 +
63378 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
63379 + compat_uptr_t p;
63380 + unsigned int len;
63381 +
63382 + if (get_user(p, argv + i))
63383 + goto log;
63384 + len = strnlen_user(compat_ptr(p), 128 - execlen);
63385 + if (len > 128 - execlen)
63386 + len = 128 - execlen;
63387 + else if (len > 0)
63388 + len--;
63389 + else
63390 + goto log;
63391 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
63392 + goto log;
63393 +
63394 + /* rewrite unprintable characters */
63395 + for (x = 0; x < len; x++) {
63396 + c = *(grarg + execlen + x);
63397 + if (c < 32 || c > 126)
63398 + *(grarg + execlen + x) = ' ';
63399 + }
63400 +
63401 + execlen += len;
63402 + *(grarg + execlen) = ' ';
63403 + *(grarg + execlen + 1) = '\0';
63404 + execlen++;
63405 + }
63406 +
63407 + log:
63408 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63409 + bprm->file->f_path.mnt, grarg);
63410 + mutex_unlock(&gr_exec_arg_mutex);
63411 +#endif
63412 + return;
63413 +}
63414 +#endif
63415 +
63416 +#ifdef CONFIG_GRKERNSEC
63417 +extern int gr_acl_is_capable(const int cap);
63418 +extern int gr_acl_is_capable_nolog(const int cap);
63419 +extern int gr_chroot_is_capable(const int cap);
63420 +extern int gr_chroot_is_capable_nolog(const int cap);
63421 +#endif
63422 +
63423 +const char *captab_log[] = {
63424 + "CAP_CHOWN",
63425 + "CAP_DAC_OVERRIDE",
63426 + "CAP_DAC_READ_SEARCH",
63427 + "CAP_FOWNER",
63428 + "CAP_FSETID",
63429 + "CAP_KILL",
63430 + "CAP_SETGID",
63431 + "CAP_SETUID",
63432 + "CAP_SETPCAP",
63433 + "CAP_LINUX_IMMUTABLE",
63434 + "CAP_NET_BIND_SERVICE",
63435 + "CAP_NET_BROADCAST",
63436 + "CAP_NET_ADMIN",
63437 + "CAP_NET_RAW",
63438 + "CAP_IPC_LOCK",
63439 + "CAP_IPC_OWNER",
63440 + "CAP_SYS_MODULE",
63441 + "CAP_SYS_RAWIO",
63442 + "CAP_SYS_CHROOT",
63443 + "CAP_SYS_PTRACE",
63444 + "CAP_SYS_PACCT",
63445 + "CAP_SYS_ADMIN",
63446 + "CAP_SYS_BOOT",
63447 + "CAP_SYS_NICE",
63448 + "CAP_SYS_RESOURCE",
63449 + "CAP_SYS_TIME",
63450 + "CAP_SYS_TTY_CONFIG",
63451 + "CAP_MKNOD",
63452 + "CAP_LEASE",
63453 + "CAP_AUDIT_WRITE",
63454 + "CAP_AUDIT_CONTROL",
63455 + "CAP_SETFCAP",
63456 + "CAP_MAC_OVERRIDE",
63457 + "CAP_MAC_ADMIN"
63458 +};
63459 +
63460 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
63461 +
63462 +int gr_is_capable(const int cap)
63463 +{
63464 +#ifdef CONFIG_GRKERNSEC
63465 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
63466 + return 1;
63467 + return 0;
63468 +#else
63469 + return 1;
63470 +#endif
63471 +}
63472 +
63473 +int gr_is_capable_nolog(const int cap)
63474 +{
63475 +#ifdef CONFIG_GRKERNSEC
63476 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
63477 + return 1;
63478 + return 0;
63479 +#else
63480 + return 1;
63481 +#endif
63482 +}
63483 +
63484 +EXPORT_SYMBOL(gr_is_capable);
63485 +EXPORT_SYMBOL(gr_is_capable_nolog);
63486 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
63487 new file mode 100644
63488 index 0000000..d3ee748
63489 --- /dev/null
63490 +++ b/grsecurity/grsec_fifo.c
63491 @@ -0,0 +1,24 @@
63492 +#include <linux/kernel.h>
63493 +#include <linux/sched.h>
63494 +#include <linux/fs.h>
63495 +#include <linux/file.h>
63496 +#include <linux/grinternal.h>
63497 +
63498 +int
63499 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
63500 + const struct dentry *dir, const int flag, const int acc_mode)
63501 +{
63502 +#ifdef CONFIG_GRKERNSEC_FIFO
63503 + const struct cred *cred = current_cred();
63504 +
63505 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
63506 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
63507 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
63508 + (cred->fsuid != dentry->d_inode->i_uid)) {
63509 + if (!inode_permission(dentry->d_inode, acc_mode))
63510 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
63511 + return -EACCES;
63512 + }
63513 +#endif
63514 + return 0;
63515 +}
63516 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
63517 new file mode 100644
63518 index 0000000..8ca18bf
63519 --- /dev/null
63520 +++ b/grsecurity/grsec_fork.c
63521 @@ -0,0 +1,23 @@
63522 +#include <linux/kernel.h>
63523 +#include <linux/sched.h>
63524 +#include <linux/grsecurity.h>
63525 +#include <linux/grinternal.h>
63526 +#include <linux/errno.h>
63527 +
63528 +void
63529 +gr_log_forkfail(const int retval)
63530 +{
63531 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
63532 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
63533 + switch (retval) {
63534 + case -EAGAIN:
63535 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
63536 + break;
63537 + case -ENOMEM:
63538 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
63539 + break;
63540 + }
63541 + }
63542 +#endif
63543 + return;
63544 +}
63545 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
63546 new file mode 100644
63547 index 0000000..1e995d3
63548 --- /dev/null
63549 +++ b/grsecurity/grsec_init.c
63550 @@ -0,0 +1,278 @@
63551 +#include <linux/kernel.h>
63552 +#include <linux/sched.h>
63553 +#include <linux/mm.h>
63554 +#include <linux/smp_lock.h>
63555 +#include <linux/gracl.h>
63556 +#include <linux/slab.h>
63557 +#include <linux/vmalloc.h>
63558 +#include <linux/percpu.h>
63559 +#include <linux/module.h>
63560 +
63561 +int grsec_enable_ptrace_readexec;
63562 +int grsec_enable_setxid;
63563 +int grsec_enable_brute;
63564 +int grsec_enable_link;
63565 +int grsec_enable_dmesg;
63566 +int grsec_enable_harden_ptrace;
63567 +int grsec_enable_fifo;
63568 +int grsec_enable_execlog;
63569 +int grsec_enable_signal;
63570 +int grsec_enable_forkfail;
63571 +int grsec_enable_audit_ptrace;
63572 +int grsec_enable_time;
63573 +int grsec_enable_audit_textrel;
63574 +int grsec_enable_group;
63575 +int grsec_audit_gid;
63576 +int grsec_enable_chdir;
63577 +int grsec_enable_mount;
63578 +int grsec_enable_rofs;
63579 +int grsec_enable_chroot_findtask;
63580 +int grsec_enable_chroot_mount;
63581 +int grsec_enable_chroot_shmat;
63582 +int grsec_enable_chroot_fchdir;
63583 +int grsec_enable_chroot_double;
63584 +int grsec_enable_chroot_pivot;
63585 +int grsec_enable_chroot_chdir;
63586 +int grsec_enable_chroot_chmod;
63587 +int grsec_enable_chroot_mknod;
63588 +int grsec_enable_chroot_nice;
63589 +int grsec_enable_chroot_execlog;
63590 +int grsec_enable_chroot_caps;
63591 +int grsec_enable_chroot_sysctl;
63592 +int grsec_enable_chroot_unix;
63593 +int grsec_enable_tpe;
63594 +int grsec_tpe_gid;
63595 +int grsec_enable_blackhole;
63596 +#ifdef CONFIG_IPV6_MODULE
63597 +EXPORT_SYMBOL(grsec_enable_blackhole);
63598 +#endif
63599 +int grsec_lastack_retries;
63600 +int grsec_enable_tpe_all;
63601 +int grsec_enable_tpe_invert;
63602 +int grsec_enable_socket_all;
63603 +int grsec_socket_all_gid;
63604 +int grsec_enable_socket_client;
63605 +int grsec_socket_client_gid;
63606 +int grsec_enable_socket_server;
63607 +int grsec_socket_server_gid;
63608 +int grsec_resource_logging;
63609 +int grsec_disable_privio;
63610 +int grsec_enable_log_rwxmaps;
63611 +int grsec_lock;
63612 +
63613 +DEFINE_SPINLOCK(grsec_alert_lock);
63614 +unsigned long grsec_alert_wtime = 0;
63615 +unsigned long grsec_alert_fyet = 0;
63616 +
63617 +DEFINE_SPINLOCK(grsec_audit_lock);
63618 +
63619 +DEFINE_RWLOCK(grsec_exec_file_lock);
63620 +
63621 +char *gr_shared_page[4];
63622 +
63623 +char *gr_alert_log_fmt;
63624 +char *gr_audit_log_fmt;
63625 +char *gr_alert_log_buf;
63626 +char *gr_audit_log_buf;
63627 +
63628 +extern struct gr_arg *gr_usermode;
63629 +extern unsigned char *gr_system_salt;
63630 +extern unsigned char *gr_system_sum;
63631 +
63632 +void __init
63633 +grsecurity_init(void)
63634 +{
63635 + int j;
63636 + /* create the per-cpu shared pages */
63637 +
63638 +#ifdef CONFIG_X86
63639 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
63640 +#endif
63641 +
63642 + for (j = 0; j < 4; j++) {
63643 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
63644 + if (gr_shared_page[j] == NULL) {
63645 + panic("Unable to allocate grsecurity shared page");
63646 + return;
63647 + }
63648 + }
63649 +
63650 + /* allocate log buffers */
63651 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
63652 + if (!gr_alert_log_fmt) {
63653 + panic("Unable to allocate grsecurity alert log format buffer");
63654 + return;
63655 + }
63656 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
63657 + if (!gr_audit_log_fmt) {
63658 + panic("Unable to allocate grsecurity audit log format buffer");
63659 + return;
63660 + }
63661 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63662 + if (!gr_alert_log_buf) {
63663 + panic("Unable to allocate grsecurity alert log buffer");
63664 + return;
63665 + }
63666 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63667 + if (!gr_audit_log_buf) {
63668 + panic("Unable to allocate grsecurity audit log buffer");
63669 + return;
63670 + }
63671 +
63672 + /* allocate memory for authentication structure */
63673 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
63674 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
63675 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
63676 +
63677 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
63678 + panic("Unable to allocate grsecurity authentication structure");
63679 + return;
63680 + }
63681 +
63682 +
63683 +#ifdef CONFIG_GRKERNSEC_IO
63684 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
63685 + grsec_disable_privio = 1;
63686 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63687 + grsec_disable_privio = 1;
63688 +#else
63689 + grsec_disable_privio = 0;
63690 +#endif
63691 +#endif
63692 +
63693 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63694 + /* for backward compatibility, tpe_invert always defaults to on if
63695 + enabled in the kernel
63696 + */
63697 + grsec_enable_tpe_invert = 1;
63698 +#endif
63699 +
63700 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63701 +#ifndef CONFIG_GRKERNSEC_SYSCTL
63702 + grsec_lock = 1;
63703 +#endif
63704 +
63705 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63706 + grsec_enable_audit_textrel = 1;
63707 +#endif
63708 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63709 + grsec_enable_log_rwxmaps = 1;
63710 +#endif
63711 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63712 + grsec_enable_group = 1;
63713 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
63714 +#endif
63715 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63716 + grsec_enable_chdir = 1;
63717 +#endif
63718 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63719 + grsec_enable_harden_ptrace = 1;
63720 +#endif
63721 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63722 + grsec_enable_mount = 1;
63723 +#endif
63724 +#ifdef CONFIG_GRKERNSEC_LINK
63725 + grsec_enable_link = 1;
63726 +#endif
63727 +#ifdef CONFIG_GRKERNSEC_BRUTE
63728 + grsec_enable_brute = 1;
63729 +#endif
63730 +#ifdef CONFIG_GRKERNSEC_DMESG
63731 + grsec_enable_dmesg = 1;
63732 +#endif
63733 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63734 + grsec_enable_blackhole = 1;
63735 + grsec_lastack_retries = 4;
63736 +#endif
63737 +#ifdef CONFIG_GRKERNSEC_FIFO
63738 + grsec_enable_fifo = 1;
63739 +#endif
63740 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63741 + grsec_enable_execlog = 1;
63742 +#endif
63743 +#ifdef CONFIG_GRKERNSEC_SETXID
63744 + grsec_enable_setxid = 1;
63745 +#endif
63746 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63747 + grsec_enable_ptrace_readexec = 1;
63748 +#endif
63749 +#ifdef CONFIG_GRKERNSEC_SIGNAL
63750 + grsec_enable_signal = 1;
63751 +#endif
63752 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
63753 + grsec_enable_forkfail = 1;
63754 +#endif
63755 +#ifdef CONFIG_GRKERNSEC_TIME
63756 + grsec_enable_time = 1;
63757 +#endif
63758 +#ifdef CONFIG_GRKERNSEC_RESLOG
63759 + grsec_resource_logging = 1;
63760 +#endif
63761 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63762 + grsec_enable_chroot_findtask = 1;
63763 +#endif
63764 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63765 + grsec_enable_chroot_unix = 1;
63766 +#endif
63767 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63768 + grsec_enable_chroot_mount = 1;
63769 +#endif
63770 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63771 + grsec_enable_chroot_fchdir = 1;
63772 +#endif
63773 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63774 + grsec_enable_chroot_shmat = 1;
63775 +#endif
63776 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63777 + grsec_enable_audit_ptrace = 1;
63778 +#endif
63779 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63780 + grsec_enable_chroot_double = 1;
63781 +#endif
63782 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63783 + grsec_enable_chroot_pivot = 1;
63784 +#endif
63785 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63786 + grsec_enable_chroot_chdir = 1;
63787 +#endif
63788 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63789 + grsec_enable_chroot_chmod = 1;
63790 +#endif
63791 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63792 + grsec_enable_chroot_mknod = 1;
63793 +#endif
63794 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63795 + grsec_enable_chroot_nice = 1;
63796 +#endif
63797 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63798 + grsec_enable_chroot_execlog = 1;
63799 +#endif
63800 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63801 + grsec_enable_chroot_caps = 1;
63802 +#endif
63803 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63804 + grsec_enable_chroot_sysctl = 1;
63805 +#endif
63806 +#ifdef CONFIG_GRKERNSEC_TPE
63807 + grsec_enable_tpe = 1;
63808 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
63809 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
63810 + grsec_enable_tpe_all = 1;
63811 +#endif
63812 +#endif
63813 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63814 + grsec_enable_socket_all = 1;
63815 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
63816 +#endif
63817 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63818 + grsec_enable_socket_client = 1;
63819 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
63820 +#endif
63821 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63822 + grsec_enable_socket_server = 1;
63823 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
63824 +#endif
63825 +#endif
63826 +
63827 + return;
63828 +}
63829 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63830 new file mode 100644
63831 index 0000000..3efe141
63832 --- /dev/null
63833 +++ b/grsecurity/grsec_link.c
63834 @@ -0,0 +1,43 @@
63835 +#include <linux/kernel.h>
63836 +#include <linux/sched.h>
63837 +#include <linux/fs.h>
63838 +#include <linux/file.h>
63839 +#include <linux/grinternal.h>
63840 +
63841 +int
63842 +gr_handle_follow_link(const struct inode *parent,
63843 + const struct inode *inode,
63844 + const struct dentry *dentry, const struct vfsmount *mnt)
63845 +{
63846 +#ifdef CONFIG_GRKERNSEC_LINK
63847 + const struct cred *cred = current_cred();
63848 +
63849 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63850 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
63851 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
63852 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63853 + return -EACCES;
63854 + }
63855 +#endif
63856 + return 0;
63857 +}
63858 +
63859 +int
63860 +gr_handle_hardlink(const struct dentry *dentry,
63861 + const struct vfsmount *mnt,
63862 + struct inode *inode, const int mode, const char *to)
63863 +{
63864 +#ifdef CONFIG_GRKERNSEC_LINK
63865 + const struct cred *cred = current_cred();
63866 +
63867 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
63868 + (!S_ISREG(mode) || (mode & S_ISUID) ||
63869 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
63870 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63871 + !capable(CAP_FOWNER) && cred->uid) {
63872 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
63873 + return -EPERM;
63874 + }
63875 +#endif
63876 + return 0;
63877 +}
63878 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63879 new file mode 100644
63880 index 0000000..a45d2e9
63881 --- /dev/null
63882 +++ b/grsecurity/grsec_log.c
63883 @@ -0,0 +1,322 @@
63884 +#include <linux/kernel.h>
63885 +#include <linux/sched.h>
63886 +#include <linux/file.h>
63887 +#include <linux/tty.h>
63888 +#include <linux/fs.h>
63889 +#include <linux/grinternal.h>
63890 +
63891 +#ifdef CONFIG_TREE_PREEMPT_RCU
63892 +#define DISABLE_PREEMPT() preempt_disable()
63893 +#define ENABLE_PREEMPT() preempt_enable()
63894 +#else
63895 +#define DISABLE_PREEMPT()
63896 +#define ENABLE_PREEMPT()
63897 +#endif
63898 +
63899 +#define BEGIN_LOCKS(x) \
63900 + DISABLE_PREEMPT(); \
63901 + rcu_read_lock(); \
63902 + read_lock(&tasklist_lock); \
63903 + read_lock(&grsec_exec_file_lock); \
63904 + if (x != GR_DO_AUDIT) \
63905 + spin_lock(&grsec_alert_lock); \
63906 + else \
63907 + spin_lock(&grsec_audit_lock)
63908 +
63909 +#define END_LOCKS(x) \
63910 + if (x != GR_DO_AUDIT) \
63911 + spin_unlock(&grsec_alert_lock); \
63912 + else \
63913 + spin_unlock(&grsec_audit_lock); \
63914 + read_unlock(&grsec_exec_file_lock); \
63915 + read_unlock(&tasklist_lock); \
63916 + rcu_read_unlock(); \
63917 + ENABLE_PREEMPT(); \
63918 + if (x == GR_DONT_AUDIT) \
63919 + gr_handle_alertkill(current)
63920 +
63921 +enum {
63922 + FLOODING,
63923 + NO_FLOODING
63924 +};
63925 +
63926 +extern char *gr_alert_log_fmt;
63927 +extern char *gr_audit_log_fmt;
63928 +extern char *gr_alert_log_buf;
63929 +extern char *gr_audit_log_buf;
63930 +
63931 +static int gr_log_start(int audit)
63932 +{
63933 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
63934 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
63935 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63936 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
63937 + unsigned long curr_secs = get_seconds();
63938 +
63939 + if (audit == GR_DO_AUDIT)
63940 + goto set_fmt;
63941 +
63942 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
63943 + grsec_alert_wtime = curr_secs;
63944 + grsec_alert_fyet = 0;
63945 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
63946 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
63947 + grsec_alert_fyet++;
63948 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
63949 + grsec_alert_wtime = curr_secs;
63950 + grsec_alert_fyet++;
63951 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
63952 + return FLOODING;
63953 + }
63954 + else return FLOODING;
63955 +
63956 +set_fmt:
63957 +#endif
63958 + memset(buf, 0, PAGE_SIZE);
63959 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
63960 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
63961 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63962 + } else if (current->signal->curr_ip) {
63963 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
63964 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
63965 + } else if (gr_acl_is_enabled()) {
63966 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
63967 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63968 + } else {
63969 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
63970 + strcpy(buf, fmt);
63971 + }
63972 +
63973 + return NO_FLOODING;
63974 +}
63975 +
63976 +static void gr_log_middle(int audit, const char *msg, va_list ap)
63977 + __attribute__ ((format (printf, 2, 0)));
63978 +
63979 +static void gr_log_middle(int audit, const char *msg, va_list ap)
63980 +{
63981 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63982 + unsigned int len = strlen(buf);
63983 +
63984 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63985 +
63986 + return;
63987 +}
63988 +
63989 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
63990 + __attribute__ ((format (printf, 2, 3)));
63991 +
63992 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
63993 +{
63994 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63995 + unsigned int len = strlen(buf);
63996 + va_list ap;
63997 +
63998 + va_start(ap, msg);
63999 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
64000 + va_end(ap);
64001 +
64002 + return;
64003 +}
64004 +
64005 +static void gr_log_end(int audit, int append_default)
64006 +{
64007 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64008 +
64009 + if (append_default) {
64010 + unsigned int len = strlen(buf);
64011 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
64012 + }
64013 +
64014 + printk("%s\n", buf);
64015 +
64016 + return;
64017 +}
64018 +
64019 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
64020 +{
64021 + int logtype;
64022 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
64023 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
64024 + void *voidptr = NULL;
64025 + int num1 = 0, num2 = 0;
64026 + unsigned long ulong1 = 0, ulong2 = 0;
64027 + struct dentry *dentry = NULL;
64028 + struct vfsmount *mnt = NULL;
64029 + struct file *file = NULL;
64030 + struct task_struct *task = NULL;
64031 + const struct cred *cred, *pcred;
64032 + va_list ap;
64033 +
64034 + BEGIN_LOCKS(audit);
64035 + logtype = gr_log_start(audit);
64036 + if (logtype == FLOODING) {
64037 + END_LOCKS(audit);
64038 + return;
64039 + }
64040 + va_start(ap, argtypes);
64041 + switch (argtypes) {
64042 + case GR_TTYSNIFF:
64043 + task = va_arg(ap, struct task_struct *);
64044 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
64045 + break;
64046 + case GR_SYSCTL_HIDDEN:
64047 + str1 = va_arg(ap, char *);
64048 + gr_log_middle_varargs(audit, msg, result, str1);
64049 + break;
64050 + case GR_RBAC:
64051 + dentry = va_arg(ap, struct dentry *);
64052 + mnt = va_arg(ap, struct vfsmount *);
64053 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
64054 + break;
64055 + case GR_RBAC_STR:
64056 + dentry = va_arg(ap, struct dentry *);
64057 + mnt = va_arg(ap, struct vfsmount *);
64058 + str1 = va_arg(ap, char *);
64059 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
64060 + break;
64061 + case GR_STR_RBAC:
64062 + str1 = va_arg(ap, char *);
64063 + dentry = va_arg(ap, struct dentry *);
64064 + mnt = va_arg(ap, struct vfsmount *);
64065 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
64066 + break;
64067 + case GR_RBAC_MODE2:
64068 + dentry = va_arg(ap, struct dentry *);
64069 + mnt = va_arg(ap, struct vfsmount *);
64070 + str1 = va_arg(ap, char *);
64071 + str2 = va_arg(ap, char *);
64072 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
64073 + break;
64074 + case GR_RBAC_MODE3:
64075 + dentry = va_arg(ap, struct dentry *);
64076 + mnt = va_arg(ap, struct vfsmount *);
64077 + str1 = va_arg(ap, char *);
64078 + str2 = va_arg(ap, char *);
64079 + str3 = va_arg(ap, char *);
64080 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
64081 + break;
64082 + case GR_FILENAME:
64083 + dentry = va_arg(ap, struct dentry *);
64084 + mnt = va_arg(ap, struct vfsmount *);
64085 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
64086 + break;
64087 + case GR_STR_FILENAME:
64088 + str1 = va_arg(ap, char *);
64089 + dentry = va_arg(ap, struct dentry *);
64090 + mnt = va_arg(ap, struct vfsmount *);
64091 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
64092 + break;
64093 + case GR_FILENAME_STR:
64094 + dentry = va_arg(ap, struct dentry *);
64095 + mnt = va_arg(ap, struct vfsmount *);
64096 + str1 = va_arg(ap, char *);
64097 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
64098 + break;
64099 + case GR_FILENAME_TWO_INT:
64100 + dentry = va_arg(ap, struct dentry *);
64101 + mnt = va_arg(ap, struct vfsmount *);
64102 + num1 = va_arg(ap, int);
64103 + num2 = va_arg(ap, int);
64104 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
64105 + break;
64106 + case GR_FILENAME_TWO_INT_STR:
64107 + dentry = va_arg(ap, struct dentry *);
64108 + mnt = va_arg(ap, struct vfsmount *);
64109 + num1 = va_arg(ap, int);
64110 + num2 = va_arg(ap, int);
64111 + str1 = va_arg(ap, char *);
64112 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
64113 + break;
64114 + case GR_TEXTREL:
64115 + file = va_arg(ap, struct file *);
64116 + ulong1 = va_arg(ap, unsigned long);
64117 + ulong2 = va_arg(ap, unsigned long);
64118 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
64119 + break;
64120 + case GR_PTRACE:
64121 + task = va_arg(ap, struct task_struct *);
64122 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
64123 + break;
64124 + case GR_RESOURCE:
64125 + task = va_arg(ap, struct task_struct *);
64126 + cred = __task_cred(task);
64127 + pcred = __task_cred(task->real_parent);
64128 + ulong1 = va_arg(ap, unsigned long);
64129 + str1 = va_arg(ap, char *);
64130 + ulong2 = va_arg(ap, unsigned long);
64131 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64132 + break;
64133 + case GR_CAP:
64134 + task = va_arg(ap, struct task_struct *);
64135 + cred = __task_cred(task);
64136 + pcred = __task_cred(task->real_parent);
64137 + str1 = va_arg(ap, char *);
64138 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64139 + break;
64140 + case GR_SIG:
64141 + str1 = va_arg(ap, char *);
64142 + voidptr = va_arg(ap, void *);
64143 + gr_log_middle_varargs(audit, msg, str1, voidptr);
64144 + break;
64145 + case GR_SIG2:
64146 + task = va_arg(ap, struct task_struct *);
64147 + cred = __task_cred(task);
64148 + pcred = __task_cred(task->real_parent);
64149 + num1 = va_arg(ap, int);
64150 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64151 + break;
64152 + case GR_CRASH1:
64153 + task = va_arg(ap, struct task_struct *);
64154 + cred = __task_cred(task);
64155 + pcred = __task_cred(task->real_parent);
64156 + ulong1 = va_arg(ap, unsigned long);
64157 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
64158 + break;
64159 + case GR_CRASH2:
64160 + task = va_arg(ap, struct task_struct *);
64161 + cred = __task_cred(task);
64162 + pcred = __task_cred(task->real_parent);
64163 + ulong1 = va_arg(ap, unsigned long);
64164 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
64165 + break;
64166 + case GR_RWXMAP:
64167 + file = va_arg(ap, struct file *);
64168 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
64169 + break;
64170 + case GR_PSACCT:
64171 + {
64172 + unsigned int wday, cday;
64173 + __u8 whr, chr;
64174 + __u8 wmin, cmin;
64175 + __u8 wsec, csec;
64176 + char cur_tty[64] = { 0 };
64177 + char parent_tty[64] = { 0 };
64178 +
64179 + task = va_arg(ap, struct task_struct *);
64180 + wday = va_arg(ap, unsigned int);
64181 + cday = va_arg(ap, unsigned int);
64182 + whr = va_arg(ap, int);
64183 + chr = va_arg(ap, int);
64184 + wmin = va_arg(ap, int);
64185 + cmin = va_arg(ap, int);
64186 + wsec = va_arg(ap, int);
64187 + csec = va_arg(ap, int);
64188 + ulong1 = va_arg(ap, unsigned long);
64189 + cred = __task_cred(task);
64190 + pcred = __task_cred(task->real_parent);
64191 +
64192 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64193 + }
64194 + break;
64195 + default:
64196 + gr_log_middle(audit, msg, ap);
64197 + }
64198 + va_end(ap);
64199 + // these don't need DEFAULTSECARGS printed on the end
64200 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
64201 + gr_log_end(audit, 0);
64202 + else
64203 + gr_log_end(audit, 1);
64204 + END_LOCKS(audit);
64205 +}
64206 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
64207 new file mode 100644
64208 index 0000000..f536303
64209 --- /dev/null
64210 +++ b/grsecurity/grsec_mem.c
64211 @@ -0,0 +1,40 @@
64212 +#include <linux/kernel.h>
64213 +#include <linux/sched.h>
64214 +#include <linux/mm.h>
64215 +#include <linux/mman.h>
64216 +#include <linux/grinternal.h>
64217 +
64218 +void
64219 +gr_handle_ioperm(void)
64220 +{
64221 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
64222 + return;
64223 +}
64224 +
64225 +void
64226 +gr_handle_iopl(void)
64227 +{
64228 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
64229 + return;
64230 +}
64231 +
64232 +void
64233 +gr_handle_mem_readwrite(u64 from, u64 to)
64234 +{
64235 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
64236 + return;
64237 +}
64238 +
64239 +void
64240 +gr_handle_vm86(void)
64241 +{
64242 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
64243 + return;
64244 +}
64245 +
64246 +void
64247 +gr_log_badprocpid(const char *entry)
64248 +{
64249 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
64250 + return;
64251 +}
64252 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
64253 new file mode 100644
64254 index 0000000..2131422
64255 --- /dev/null
64256 +++ b/grsecurity/grsec_mount.c
64257 @@ -0,0 +1,62 @@
64258 +#include <linux/kernel.h>
64259 +#include <linux/sched.h>
64260 +#include <linux/mount.h>
64261 +#include <linux/grsecurity.h>
64262 +#include <linux/grinternal.h>
64263 +
64264 +void
64265 +gr_log_remount(const char *devname, const int retval)
64266 +{
64267 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64268 + if (grsec_enable_mount && (retval >= 0))
64269 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
64270 +#endif
64271 + return;
64272 +}
64273 +
64274 +void
64275 +gr_log_unmount(const char *devname, const int retval)
64276 +{
64277 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64278 + if (grsec_enable_mount && (retval >= 0))
64279 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
64280 +#endif
64281 + return;
64282 +}
64283 +
64284 +void
64285 +gr_log_mount(const char *from, const char *to, const int retval)
64286 +{
64287 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64288 + if (grsec_enable_mount && (retval >= 0))
64289 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
64290 +#endif
64291 + return;
64292 +}
64293 +
64294 +int
64295 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
64296 +{
64297 +#ifdef CONFIG_GRKERNSEC_ROFS
64298 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
64299 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
64300 + return -EPERM;
64301 + } else
64302 + return 0;
64303 +#endif
64304 + return 0;
64305 +}
64306 +
64307 +int
64308 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
64309 +{
64310 +#ifdef CONFIG_GRKERNSEC_ROFS
64311 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
64312 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
64313 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
64314 + return -EPERM;
64315 + } else
64316 + return 0;
64317 +#endif
64318 + return 0;
64319 +}
64320 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
64321 new file mode 100644
64322 index 0000000..a3b12a0
64323 --- /dev/null
64324 +++ b/grsecurity/grsec_pax.c
64325 @@ -0,0 +1,36 @@
64326 +#include <linux/kernel.h>
64327 +#include <linux/sched.h>
64328 +#include <linux/mm.h>
64329 +#include <linux/file.h>
64330 +#include <linux/grinternal.h>
64331 +#include <linux/grsecurity.h>
64332 +
64333 +void
64334 +gr_log_textrel(struct vm_area_struct * vma)
64335 +{
64336 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64337 + if (grsec_enable_audit_textrel)
64338 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
64339 +#endif
64340 + return;
64341 +}
64342 +
64343 +void
64344 +gr_log_rwxmmap(struct file *file)
64345 +{
64346 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64347 + if (grsec_enable_log_rwxmaps)
64348 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
64349 +#endif
64350 + return;
64351 +}
64352 +
64353 +void
64354 +gr_log_rwxmprotect(struct file *file)
64355 +{
64356 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64357 + if (grsec_enable_log_rwxmaps)
64358 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
64359 +#endif
64360 + return;
64361 +}
64362 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
64363 new file mode 100644
64364 index 0000000..78f8733
64365 --- /dev/null
64366 +++ b/grsecurity/grsec_ptrace.c
64367 @@ -0,0 +1,30 @@
64368 +#include <linux/kernel.h>
64369 +#include <linux/sched.h>
64370 +#include <linux/grinternal.h>
64371 +#include <linux/security.h>
64372 +
64373 +void
64374 +gr_audit_ptrace(struct task_struct *task)
64375 +{
64376 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64377 + if (grsec_enable_audit_ptrace)
64378 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
64379 +#endif
64380 + return;
64381 +}
64382 +
64383 +int
64384 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
64385 +{
64386 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64387 + const struct dentry *dentry = file->f_path.dentry;
64388 + const struct vfsmount *mnt = file->f_path.mnt;
64389 +
64390 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
64391 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
64392 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
64393 + return -EACCES;
64394 + }
64395 +#endif
64396 + return 0;
64397 +}
64398 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
64399 new file mode 100644
64400 index 0000000..c648492
64401 --- /dev/null
64402 +++ b/grsecurity/grsec_sig.c
64403 @@ -0,0 +1,206 @@
64404 +#include <linux/kernel.h>
64405 +#include <linux/sched.h>
64406 +#include <linux/delay.h>
64407 +#include <linux/grsecurity.h>
64408 +#include <linux/grinternal.h>
64409 +#include <linux/hardirq.h>
64410 +
64411 +char *signames[] = {
64412 + [SIGSEGV] = "Segmentation fault",
64413 + [SIGILL] = "Illegal instruction",
64414 + [SIGABRT] = "Abort",
64415 + [SIGBUS] = "Invalid alignment/Bus error"
64416 +};
64417 +
64418 +void
64419 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
64420 +{
64421 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64422 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
64423 + (sig == SIGABRT) || (sig == SIGBUS))) {
64424 + if (t->pid == current->pid) {
64425 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
64426 + } else {
64427 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
64428 + }
64429 + }
64430 +#endif
64431 + return;
64432 +}
64433 +
64434 +int
64435 +gr_handle_signal(const struct task_struct *p, const int sig)
64436 +{
64437 +#ifdef CONFIG_GRKERNSEC
64438 + /* ignore the 0 signal for protected task checks */
64439 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
64440 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
64441 + return -EPERM;
64442 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
64443 + return -EPERM;
64444 + }
64445 +#endif
64446 + return 0;
64447 +}
64448 +
64449 +#ifdef CONFIG_GRKERNSEC
64450 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
64451 +
64452 +int gr_fake_force_sig(int sig, struct task_struct *t)
64453 +{
64454 + unsigned long int flags;
64455 + int ret, blocked, ignored;
64456 + struct k_sigaction *action;
64457 +
64458 + spin_lock_irqsave(&t->sighand->siglock, flags);
64459 + action = &t->sighand->action[sig-1];
64460 + ignored = action->sa.sa_handler == SIG_IGN;
64461 + blocked = sigismember(&t->blocked, sig);
64462 + if (blocked || ignored) {
64463 + action->sa.sa_handler = SIG_DFL;
64464 + if (blocked) {
64465 + sigdelset(&t->blocked, sig);
64466 + recalc_sigpending_and_wake(t);
64467 + }
64468 + }
64469 + if (action->sa.sa_handler == SIG_DFL)
64470 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
64471 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
64472 +
64473 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
64474 +
64475 + return ret;
64476 +}
64477 +#endif
64478 +
64479 +#ifdef CONFIG_GRKERNSEC_BRUTE
64480 +#define GR_USER_BAN_TIME (15 * 60)
64481 +
64482 +static int __get_dumpable(unsigned long mm_flags)
64483 +{
64484 + int ret;
64485 +
64486 + ret = mm_flags & MMF_DUMPABLE_MASK;
64487 + return (ret >= 2) ? 2 : ret;
64488 +}
64489 +#endif
64490 +
64491 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
64492 +{
64493 +#ifdef CONFIG_GRKERNSEC_BRUTE
64494 + uid_t uid = 0;
64495 +
64496 + if (!grsec_enable_brute)
64497 + return;
64498 +
64499 + rcu_read_lock();
64500 + read_lock(&tasklist_lock);
64501 + read_lock(&grsec_exec_file_lock);
64502 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
64503 + p->real_parent->brute = 1;
64504 + else {
64505 + const struct cred *cred = __task_cred(p), *cred2;
64506 + struct task_struct *tsk, *tsk2;
64507 +
64508 + if (!__get_dumpable(mm_flags) && cred->uid) {
64509 + struct user_struct *user;
64510 +
64511 + uid = cred->uid;
64512 +
64513 + /* this is put upon execution past expiration */
64514 + user = find_user(uid);
64515 + if (user == NULL)
64516 + goto unlock;
64517 + user->banned = 1;
64518 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
64519 + if (user->ban_expires == ~0UL)
64520 + user->ban_expires--;
64521 +
64522 + do_each_thread(tsk2, tsk) {
64523 + cred2 = __task_cred(tsk);
64524 + if (tsk != p && cred2->uid == uid)
64525 + gr_fake_force_sig(SIGKILL, tsk);
64526 + } while_each_thread(tsk2, tsk);
64527 + }
64528 + }
64529 +unlock:
64530 + read_unlock(&grsec_exec_file_lock);
64531 + read_unlock(&tasklist_lock);
64532 + rcu_read_unlock();
64533 +
64534 + if (uid)
64535 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
64536 +#endif
64537 + return;
64538 +}
64539 +
64540 +void gr_handle_brute_check(void)
64541 +{
64542 +#ifdef CONFIG_GRKERNSEC_BRUTE
64543 + if (current->brute)
64544 + msleep(30 * 1000);
64545 +#endif
64546 + return;
64547 +}
64548 +
64549 +void gr_handle_kernel_exploit(void)
64550 +{
64551 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
64552 + const struct cred *cred;
64553 + struct task_struct *tsk, *tsk2;
64554 + struct user_struct *user;
64555 + uid_t uid;
64556 +
64557 + if (in_irq() || in_serving_softirq() || in_nmi())
64558 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
64559 +
64560 + uid = current_uid();
64561 +
64562 + if (uid == 0)
64563 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
64564 + else {
64565 + /* kill all the processes of this user, hold a reference
64566 + to their creds struct, and prevent them from creating
64567 + another process until system reset
64568 + */
64569 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
64570 + /* we intentionally leak this ref */
64571 + user = get_uid(current->cred->user);
64572 + if (user) {
64573 + user->banned = 1;
64574 + user->ban_expires = ~0UL;
64575 + }
64576 +
64577 + read_lock(&tasklist_lock);
64578 + do_each_thread(tsk2, tsk) {
64579 + cred = __task_cred(tsk);
64580 + if (cred->uid == uid)
64581 + gr_fake_force_sig(SIGKILL, tsk);
64582 + } while_each_thread(tsk2, tsk);
64583 + read_unlock(&tasklist_lock);
64584 + }
64585 +#endif
64586 +}
64587 +
64588 +int __gr_process_user_ban(struct user_struct *user)
64589 +{
64590 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64591 + if (unlikely(user->banned)) {
64592 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
64593 + user->banned = 0;
64594 + user->ban_expires = 0;
64595 + free_uid(user);
64596 + } else
64597 + return -EPERM;
64598 + }
64599 +#endif
64600 + return 0;
64601 +}
64602 +
64603 +int gr_process_user_ban(void)
64604 +{
64605 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64606 + return __gr_process_user_ban(current->cred->user);
64607 +#endif
64608 + return 0;
64609 +}
64610 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
64611 new file mode 100644
64612 index 0000000..7512ea9
64613 --- /dev/null
64614 +++ b/grsecurity/grsec_sock.c
64615 @@ -0,0 +1,275 @@
64616 +#include <linux/kernel.h>
64617 +#include <linux/module.h>
64618 +#include <linux/sched.h>
64619 +#include <linux/file.h>
64620 +#include <linux/net.h>
64621 +#include <linux/in.h>
64622 +#include <linux/ip.h>
64623 +#include <net/sock.h>
64624 +#include <net/inet_sock.h>
64625 +#include <linux/grsecurity.h>
64626 +#include <linux/grinternal.h>
64627 +#include <linux/gracl.h>
64628 +
64629 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
64630 +EXPORT_SYMBOL(gr_cap_rtnetlink);
64631 +
64632 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
64633 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
64634 +
64635 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
64636 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
64637 +
64638 +#ifdef CONFIG_UNIX_MODULE
64639 +EXPORT_SYMBOL(gr_acl_handle_unix);
64640 +EXPORT_SYMBOL(gr_acl_handle_mknod);
64641 +EXPORT_SYMBOL(gr_handle_chroot_unix);
64642 +EXPORT_SYMBOL(gr_handle_create);
64643 +#endif
64644 +
64645 +#ifdef CONFIG_GRKERNSEC
64646 +#define gr_conn_table_size 32749
64647 +struct conn_table_entry {
64648 + struct conn_table_entry *next;
64649 + struct signal_struct *sig;
64650 +};
64651 +
64652 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
64653 +DEFINE_SPINLOCK(gr_conn_table_lock);
64654 +
64655 +extern const char * gr_socktype_to_name(unsigned char type);
64656 +extern const char * gr_proto_to_name(unsigned char proto);
64657 +extern const char * gr_sockfamily_to_name(unsigned char family);
64658 +
64659 +static __inline__ int
64660 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
64661 +{
64662 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
64663 +}
64664 +
64665 +static __inline__ int
64666 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
64667 + __u16 sport, __u16 dport)
64668 +{
64669 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
64670 + sig->gr_sport == sport && sig->gr_dport == dport))
64671 + return 1;
64672 + else
64673 + return 0;
64674 +}
64675 +
64676 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
64677 +{
64678 + struct conn_table_entry **match;
64679 + unsigned int index;
64680 +
64681 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64682 + sig->gr_sport, sig->gr_dport,
64683 + gr_conn_table_size);
64684 +
64685 + newent->sig = sig;
64686 +
64687 + match = &gr_conn_table[index];
64688 + newent->next = *match;
64689 + *match = newent;
64690 +
64691 + return;
64692 +}
64693 +
64694 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
64695 +{
64696 + struct conn_table_entry *match, *last = NULL;
64697 + unsigned int index;
64698 +
64699 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64700 + sig->gr_sport, sig->gr_dport,
64701 + gr_conn_table_size);
64702 +
64703 + match = gr_conn_table[index];
64704 + while (match && !conn_match(match->sig,
64705 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
64706 + sig->gr_dport)) {
64707 + last = match;
64708 + match = match->next;
64709 + }
64710 +
64711 + if (match) {
64712 + if (last)
64713 + last->next = match->next;
64714 + else
64715 + gr_conn_table[index] = NULL;
64716 + kfree(match);
64717 + }
64718 +
64719 + return;
64720 +}
64721 +
64722 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
64723 + __u16 sport, __u16 dport)
64724 +{
64725 + struct conn_table_entry *match;
64726 + unsigned int index;
64727 +
64728 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
64729 +
64730 + match = gr_conn_table[index];
64731 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
64732 + match = match->next;
64733 +
64734 + if (match)
64735 + return match->sig;
64736 + else
64737 + return NULL;
64738 +}
64739 +
64740 +#endif
64741 +
64742 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
64743 +{
64744 +#ifdef CONFIG_GRKERNSEC
64745 + struct signal_struct *sig = task->signal;
64746 + struct conn_table_entry *newent;
64747 +
64748 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
64749 + if (newent == NULL)
64750 + return;
64751 + /* no bh lock needed since we are called with bh disabled */
64752 + spin_lock(&gr_conn_table_lock);
64753 + gr_del_task_from_ip_table_nolock(sig);
64754 + sig->gr_saddr = inet->rcv_saddr;
64755 + sig->gr_daddr = inet->daddr;
64756 + sig->gr_sport = inet->sport;
64757 + sig->gr_dport = inet->dport;
64758 + gr_add_to_task_ip_table_nolock(sig, newent);
64759 + spin_unlock(&gr_conn_table_lock);
64760 +#endif
64761 + return;
64762 +}
64763 +
64764 +void gr_del_task_from_ip_table(struct task_struct *task)
64765 +{
64766 +#ifdef CONFIG_GRKERNSEC
64767 + spin_lock_bh(&gr_conn_table_lock);
64768 + gr_del_task_from_ip_table_nolock(task->signal);
64769 + spin_unlock_bh(&gr_conn_table_lock);
64770 +#endif
64771 + return;
64772 +}
64773 +
64774 +void
64775 +gr_attach_curr_ip(const struct sock *sk)
64776 +{
64777 +#ifdef CONFIG_GRKERNSEC
64778 + struct signal_struct *p, *set;
64779 + const struct inet_sock *inet = inet_sk(sk);
64780 +
64781 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64782 + return;
64783 +
64784 + set = current->signal;
64785 +
64786 + spin_lock_bh(&gr_conn_table_lock);
64787 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
64788 + inet->dport, inet->sport);
64789 + if (unlikely(p != NULL)) {
64790 + set->curr_ip = p->curr_ip;
64791 + set->used_accept = 1;
64792 + gr_del_task_from_ip_table_nolock(p);
64793 + spin_unlock_bh(&gr_conn_table_lock);
64794 + return;
64795 + }
64796 + spin_unlock_bh(&gr_conn_table_lock);
64797 +
64798 + set->curr_ip = inet->daddr;
64799 + set->used_accept = 1;
64800 +#endif
64801 + return;
64802 +}
64803 +
64804 +int
64805 +gr_handle_sock_all(const int family, const int type, const int protocol)
64806 +{
64807 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64808 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64809 + (family != AF_UNIX)) {
64810 + if (family == AF_INET)
64811 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64812 + else
64813 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64814 + return -EACCES;
64815 + }
64816 +#endif
64817 + return 0;
64818 +}
64819 +
64820 +int
64821 +gr_handle_sock_server(const struct sockaddr *sck)
64822 +{
64823 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64824 + if (grsec_enable_socket_server &&
64825 + in_group_p(grsec_socket_server_gid) &&
64826 + sck && (sck->sa_family != AF_UNIX) &&
64827 + (sck->sa_family != AF_LOCAL)) {
64828 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64829 + return -EACCES;
64830 + }
64831 +#endif
64832 + return 0;
64833 +}
64834 +
64835 +int
64836 +gr_handle_sock_server_other(const struct sock *sck)
64837 +{
64838 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64839 + if (grsec_enable_socket_server &&
64840 + in_group_p(grsec_socket_server_gid) &&
64841 + sck && (sck->sk_family != AF_UNIX) &&
64842 + (sck->sk_family != AF_LOCAL)) {
64843 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64844 + return -EACCES;
64845 + }
64846 +#endif
64847 + return 0;
64848 +}
64849 +
64850 +int
64851 +gr_handle_sock_client(const struct sockaddr *sck)
64852 +{
64853 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64854 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64855 + sck && (sck->sa_family != AF_UNIX) &&
64856 + (sck->sa_family != AF_LOCAL)) {
64857 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64858 + return -EACCES;
64859 + }
64860 +#endif
64861 + return 0;
64862 +}
64863 +
64864 +kernel_cap_t
64865 +gr_cap_rtnetlink(struct sock *sock)
64866 +{
64867 +#ifdef CONFIG_GRKERNSEC
64868 + if (!gr_acl_is_enabled())
64869 + return current_cap();
64870 + else if (sock->sk_protocol == NETLINK_ISCSI &&
64871 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
64872 + gr_is_capable(CAP_SYS_ADMIN))
64873 + return current_cap();
64874 + else if (sock->sk_protocol == NETLINK_AUDIT &&
64875 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
64876 + gr_is_capable(CAP_AUDIT_WRITE) &&
64877 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
64878 + gr_is_capable(CAP_AUDIT_CONTROL))
64879 + return current_cap();
64880 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
64881 + ((sock->sk_protocol == NETLINK_ROUTE) ?
64882 + gr_is_capable_nolog(CAP_NET_ADMIN) :
64883 + gr_is_capable(CAP_NET_ADMIN)))
64884 + return current_cap();
64885 + else
64886 + return __cap_empty_set;
64887 +#else
64888 + return current_cap();
64889 +#endif
64890 +}
64891 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64892 new file mode 100644
64893 index 0000000..31f3258
64894 --- /dev/null
64895 +++ b/grsecurity/grsec_sysctl.c
64896 @@ -0,0 +1,499 @@
64897 +#include <linux/kernel.h>
64898 +#include <linux/sched.h>
64899 +#include <linux/sysctl.h>
64900 +#include <linux/grsecurity.h>
64901 +#include <linux/grinternal.h>
64902 +
64903 +int
64904 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64905 +{
64906 +#ifdef CONFIG_GRKERNSEC_SYSCTL
64907 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64908 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64909 + return -EACCES;
64910 + }
64911 +#endif
64912 + return 0;
64913 +}
64914 +
64915 +#ifdef CONFIG_GRKERNSEC_ROFS
64916 +static int __maybe_unused one = 1;
64917 +#endif
64918 +
64919 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64920 +ctl_table grsecurity_table[] = {
64921 +#ifdef CONFIG_GRKERNSEC_SYSCTL
64922 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
64923 +#ifdef CONFIG_GRKERNSEC_IO
64924 + {
64925 + .ctl_name = CTL_UNNUMBERED,
64926 + .procname = "disable_priv_io",
64927 + .data = &grsec_disable_privio,
64928 + .maxlen = sizeof(int),
64929 + .mode = 0600,
64930 + .proc_handler = &proc_dointvec,
64931 + },
64932 +#endif
64933 +#endif
64934 +#ifdef CONFIG_GRKERNSEC_LINK
64935 + {
64936 + .ctl_name = CTL_UNNUMBERED,
64937 + .procname = "linking_restrictions",
64938 + .data = &grsec_enable_link,
64939 + .maxlen = sizeof(int),
64940 + .mode = 0600,
64941 + .proc_handler = &proc_dointvec,
64942 + },
64943 +#endif
64944 +#ifdef CONFIG_GRKERNSEC_BRUTE
64945 + {
64946 + .ctl_name = CTL_UNNUMBERED,
64947 + .procname = "deter_bruteforce",
64948 + .data = &grsec_enable_brute,
64949 + .maxlen = sizeof(int),
64950 + .mode = 0600,
64951 + .proc_handler = &proc_dointvec,
64952 + },
64953 +#endif
64954 +#ifdef CONFIG_GRKERNSEC_FIFO
64955 + {
64956 + .ctl_name = CTL_UNNUMBERED,
64957 + .procname = "fifo_restrictions",
64958 + .data = &grsec_enable_fifo,
64959 + .maxlen = sizeof(int),
64960 + .mode = 0600,
64961 + .proc_handler = &proc_dointvec,
64962 + },
64963 +#endif
64964 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64965 + {
64966 + .ctl_name = CTL_UNNUMBERED,
64967 + .procname = "ptrace_readexec",
64968 + .data = &grsec_enable_ptrace_readexec,
64969 + .maxlen = sizeof(int),
64970 + .mode = 0600,
64971 + .proc_handler = &proc_dointvec,
64972 + },
64973 +#endif
64974 +#ifdef CONFIG_GRKERNSEC_SETXID
64975 + {
64976 + .ctl_name = CTL_UNNUMBERED,
64977 + .procname = "consistent_setxid",
64978 + .data = &grsec_enable_setxid,
64979 + .maxlen = sizeof(int),
64980 + .mode = 0600,
64981 + .proc_handler = &proc_dointvec,
64982 + },
64983 +#endif
64984 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64985 + {
64986 + .ctl_name = CTL_UNNUMBERED,
64987 + .procname = "ip_blackhole",
64988 + .data = &grsec_enable_blackhole,
64989 + .maxlen = sizeof(int),
64990 + .mode = 0600,
64991 + .proc_handler = &proc_dointvec,
64992 + },
64993 + {
64994 + .ctl_name = CTL_UNNUMBERED,
64995 + .procname = "lastack_retries",
64996 + .data = &grsec_lastack_retries,
64997 + .maxlen = sizeof(int),
64998 + .mode = 0600,
64999 + .proc_handler = &proc_dointvec,
65000 + },
65001 +#endif
65002 +#ifdef CONFIG_GRKERNSEC_EXECLOG
65003 + {
65004 + .ctl_name = CTL_UNNUMBERED,
65005 + .procname = "exec_logging",
65006 + .data = &grsec_enable_execlog,
65007 + .maxlen = sizeof(int),
65008 + .mode = 0600,
65009 + .proc_handler = &proc_dointvec,
65010 + },
65011 +#endif
65012 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65013 + {
65014 + .ctl_name = CTL_UNNUMBERED,
65015 + .procname = "rwxmap_logging",
65016 + .data = &grsec_enable_log_rwxmaps,
65017 + .maxlen = sizeof(int),
65018 + .mode = 0600,
65019 + .proc_handler = &proc_dointvec,
65020 + },
65021 +#endif
65022 +#ifdef CONFIG_GRKERNSEC_SIGNAL
65023 + {
65024 + .ctl_name = CTL_UNNUMBERED,
65025 + .procname = "signal_logging",
65026 + .data = &grsec_enable_signal,
65027 + .maxlen = sizeof(int),
65028 + .mode = 0600,
65029 + .proc_handler = &proc_dointvec,
65030 + },
65031 +#endif
65032 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
65033 + {
65034 + .ctl_name = CTL_UNNUMBERED,
65035 + .procname = "forkfail_logging",
65036 + .data = &grsec_enable_forkfail,
65037 + .maxlen = sizeof(int),
65038 + .mode = 0600,
65039 + .proc_handler = &proc_dointvec,
65040 + },
65041 +#endif
65042 +#ifdef CONFIG_GRKERNSEC_TIME
65043 + {
65044 + .ctl_name = CTL_UNNUMBERED,
65045 + .procname = "timechange_logging",
65046 + .data = &grsec_enable_time,
65047 + .maxlen = sizeof(int),
65048 + .mode = 0600,
65049 + .proc_handler = &proc_dointvec,
65050 + },
65051 +#endif
65052 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
65053 + {
65054 + .ctl_name = CTL_UNNUMBERED,
65055 + .procname = "chroot_deny_shmat",
65056 + .data = &grsec_enable_chroot_shmat,
65057 + .maxlen = sizeof(int),
65058 + .mode = 0600,
65059 + .proc_handler = &proc_dointvec,
65060 + },
65061 +#endif
65062 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65063 + {
65064 + .ctl_name = CTL_UNNUMBERED,
65065 + .procname = "chroot_deny_unix",
65066 + .data = &grsec_enable_chroot_unix,
65067 + .maxlen = sizeof(int),
65068 + .mode = 0600,
65069 + .proc_handler = &proc_dointvec,
65070 + },
65071 +#endif
65072 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65073 + {
65074 + .ctl_name = CTL_UNNUMBERED,
65075 + .procname = "chroot_deny_mount",
65076 + .data = &grsec_enable_chroot_mount,
65077 + .maxlen = sizeof(int),
65078 + .mode = 0600,
65079 + .proc_handler = &proc_dointvec,
65080 + },
65081 +#endif
65082 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65083 + {
65084 + .ctl_name = CTL_UNNUMBERED,
65085 + .procname = "chroot_deny_fchdir",
65086 + .data = &grsec_enable_chroot_fchdir,
65087 + .maxlen = sizeof(int),
65088 + .mode = 0600,
65089 + .proc_handler = &proc_dointvec,
65090 + },
65091 +#endif
65092 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65093 + {
65094 + .ctl_name = CTL_UNNUMBERED,
65095 + .procname = "chroot_deny_chroot",
65096 + .data = &grsec_enable_chroot_double,
65097 + .maxlen = sizeof(int),
65098 + .mode = 0600,
65099 + .proc_handler = &proc_dointvec,
65100 + },
65101 +#endif
65102 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65103 + {
65104 + .ctl_name = CTL_UNNUMBERED,
65105 + .procname = "chroot_deny_pivot",
65106 + .data = &grsec_enable_chroot_pivot,
65107 + .maxlen = sizeof(int),
65108 + .mode = 0600,
65109 + .proc_handler = &proc_dointvec,
65110 + },
65111 +#endif
65112 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65113 + {
65114 + .ctl_name = CTL_UNNUMBERED,
65115 + .procname = "chroot_enforce_chdir",
65116 + .data = &grsec_enable_chroot_chdir,
65117 + .maxlen = sizeof(int),
65118 + .mode = 0600,
65119 + .proc_handler = &proc_dointvec,
65120 + },
65121 +#endif
65122 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65123 + {
65124 + .ctl_name = CTL_UNNUMBERED,
65125 + .procname = "chroot_deny_chmod",
65126 + .data = &grsec_enable_chroot_chmod,
65127 + .maxlen = sizeof(int),
65128 + .mode = 0600,
65129 + .proc_handler = &proc_dointvec,
65130 + },
65131 +#endif
65132 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65133 + {
65134 + .ctl_name = CTL_UNNUMBERED,
65135 + .procname = "chroot_deny_mknod",
65136 + .data = &grsec_enable_chroot_mknod,
65137 + .maxlen = sizeof(int),
65138 + .mode = 0600,
65139 + .proc_handler = &proc_dointvec,
65140 + },
65141 +#endif
65142 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65143 + {
65144 + .ctl_name = CTL_UNNUMBERED,
65145 + .procname = "chroot_restrict_nice",
65146 + .data = &grsec_enable_chroot_nice,
65147 + .maxlen = sizeof(int),
65148 + .mode = 0600,
65149 + .proc_handler = &proc_dointvec,
65150 + },
65151 +#endif
65152 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65153 + {
65154 + .ctl_name = CTL_UNNUMBERED,
65155 + .procname = "chroot_execlog",
65156 + .data = &grsec_enable_chroot_execlog,
65157 + .maxlen = sizeof(int),
65158 + .mode = 0600,
65159 + .proc_handler = &proc_dointvec,
65160 + },
65161 +#endif
65162 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65163 + {
65164 + .ctl_name = CTL_UNNUMBERED,
65165 + .procname = "chroot_caps",
65166 + .data = &grsec_enable_chroot_caps,
65167 + .maxlen = sizeof(int),
65168 + .mode = 0600,
65169 + .proc_handler = &proc_dointvec,
65170 + },
65171 +#endif
65172 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65173 + {
65174 + .ctl_name = CTL_UNNUMBERED,
65175 + .procname = "chroot_deny_sysctl",
65176 + .data = &grsec_enable_chroot_sysctl,
65177 + .maxlen = sizeof(int),
65178 + .mode = 0600,
65179 + .proc_handler = &proc_dointvec,
65180 + },
65181 +#endif
65182 +#ifdef CONFIG_GRKERNSEC_TPE
65183 + {
65184 + .ctl_name = CTL_UNNUMBERED,
65185 + .procname = "tpe",
65186 + .data = &grsec_enable_tpe,
65187 + .maxlen = sizeof(int),
65188 + .mode = 0600,
65189 + .proc_handler = &proc_dointvec,
65190 + },
65191 + {
65192 + .ctl_name = CTL_UNNUMBERED,
65193 + .procname = "tpe_gid",
65194 + .data = &grsec_tpe_gid,
65195 + .maxlen = sizeof(int),
65196 + .mode = 0600,
65197 + .proc_handler = &proc_dointvec,
65198 + },
65199 +#endif
65200 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65201 + {
65202 + .ctl_name = CTL_UNNUMBERED,
65203 + .procname = "tpe_invert",
65204 + .data = &grsec_enable_tpe_invert,
65205 + .maxlen = sizeof(int),
65206 + .mode = 0600,
65207 + .proc_handler = &proc_dointvec,
65208 + },
65209 +#endif
65210 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
65211 + {
65212 + .ctl_name = CTL_UNNUMBERED,
65213 + .procname = "tpe_restrict_all",
65214 + .data = &grsec_enable_tpe_all,
65215 + .maxlen = sizeof(int),
65216 + .mode = 0600,
65217 + .proc_handler = &proc_dointvec,
65218 + },
65219 +#endif
65220 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65221 + {
65222 + .ctl_name = CTL_UNNUMBERED,
65223 + .procname = "socket_all",
65224 + .data = &grsec_enable_socket_all,
65225 + .maxlen = sizeof(int),
65226 + .mode = 0600,
65227 + .proc_handler = &proc_dointvec,
65228 + },
65229 + {
65230 + .ctl_name = CTL_UNNUMBERED,
65231 + .procname = "socket_all_gid",
65232 + .data = &grsec_socket_all_gid,
65233 + .maxlen = sizeof(int),
65234 + .mode = 0600,
65235 + .proc_handler = &proc_dointvec,
65236 + },
65237 +#endif
65238 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65239 + {
65240 + .ctl_name = CTL_UNNUMBERED,
65241 + .procname = "socket_client",
65242 + .data = &grsec_enable_socket_client,
65243 + .maxlen = sizeof(int),
65244 + .mode = 0600,
65245 + .proc_handler = &proc_dointvec,
65246 + },
65247 + {
65248 + .ctl_name = CTL_UNNUMBERED,
65249 + .procname = "socket_client_gid",
65250 + .data = &grsec_socket_client_gid,
65251 + .maxlen = sizeof(int),
65252 + .mode = 0600,
65253 + .proc_handler = &proc_dointvec,
65254 + },
65255 +#endif
65256 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65257 + {
65258 + .ctl_name = CTL_UNNUMBERED,
65259 + .procname = "socket_server",
65260 + .data = &grsec_enable_socket_server,
65261 + .maxlen = sizeof(int),
65262 + .mode = 0600,
65263 + .proc_handler = &proc_dointvec,
65264 + },
65265 + {
65266 + .ctl_name = CTL_UNNUMBERED,
65267 + .procname = "socket_server_gid",
65268 + .data = &grsec_socket_server_gid,
65269 + .maxlen = sizeof(int),
65270 + .mode = 0600,
65271 + .proc_handler = &proc_dointvec,
65272 + },
65273 +#endif
65274 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65275 + {
65276 + .ctl_name = CTL_UNNUMBERED,
65277 + .procname = "audit_group",
65278 + .data = &grsec_enable_group,
65279 + .maxlen = sizeof(int),
65280 + .mode = 0600,
65281 + .proc_handler = &proc_dointvec,
65282 + },
65283 + {
65284 + .ctl_name = CTL_UNNUMBERED,
65285 + .procname = "audit_gid",
65286 + .data = &grsec_audit_gid,
65287 + .maxlen = sizeof(int),
65288 + .mode = 0600,
65289 + .proc_handler = &proc_dointvec,
65290 + },
65291 +#endif
65292 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65293 + {
65294 + .ctl_name = CTL_UNNUMBERED,
65295 + .procname = "audit_chdir",
65296 + .data = &grsec_enable_chdir,
65297 + .maxlen = sizeof(int),
65298 + .mode = 0600,
65299 + .proc_handler = &proc_dointvec,
65300 + },
65301 +#endif
65302 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65303 + {
65304 + .ctl_name = CTL_UNNUMBERED,
65305 + .procname = "audit_mount",
65306 + .data = &grsec_enable_mount,
65307 + .maxlen = sizeof(int),
65308 + .mode = 0600,
65309 + .proc_handler = &proc_dointvec,
65310 + },
65311 +#endif
65312 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65313 + {
65314 + .ctl_name = CTL_UNNUMBERED,
65315 + .procname = "audit_textrel",
65316 + .data = &grsec_enable_audit_textrel,
65317 + .maxlen = sizeof(int),
65318 + .mode = 0600,
65319 + .proc_handler = &proc_dointvec,
65320 + },
65321 +#endif
65322 +#ifdef CONFIG_GRKERNSEC_DMESG
65323 + {
65324 + .ctl_name = CTL_UNNUMBERED,
65325 + .procname = "dmesg",
65326 + .data = &grsec_enable_dmesg,
65327 + .maxlen = sizeof(int),
65328 + .mode = 0600,
65329 + .proc_handler = &proc_dointvec,
65330 + },
65331 +#endif
65332 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65333 + {
65334 + .ctl_name = CTL_UNNUMBERED,
65335 + .procname = "chroot_findtask",
65336 + .data = &grsec_enable_chroot_findtask,
65337 + .maxlen = sizeof(int),
65338 + .mode = 0600,
65339 + .proc_handler = &proc_dointvec,
65340 + },
65341 +#endif
65342 +#ifdef CONFIG_GRKERNSEC_RESLOG
65343 + {
65344 + .ctl_name = CTL_UNNUMBERED,
65345 + .procname = "resource_logging",
65346 + .data = &grsec_resource_logging,
65347 + .maxlen = sizeof(int),
65348 + .mode = 0600,
65349 + .proc_handler = &proc_dointvec,
65350 + },
65351 +#endif
65352 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65353 + {
65354 + .ctl_name = CTL_UNNUMBERED,
65355 + .procname = "audit_ptrace",
65356 + .data = &grsec_enable_audit_ptrace,
65357 + .maxlen = sizeof(int),
65358 + .mode = 0600,
65359 + .proc_handler = &proc_dointvec,
65360 + },
65361 +#endif
65362 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65363 + {
65364 + .ctl_name = CTL_UNNUMBERED,
65365 + .procname = "harden_ptrace",
65366 + .data = &grsec_enable_harden_ptrace,
65367 + .maxlen = sizeof(int),
65368 + .mode = 0600,
65369 + .proc_handler = &proc_dointvec,
65370 + },
65371 +#endif
65372 + {
65373 + .ctl_name = CTL_UNNUMBERED,
65374 + .procname = "grsec_lock",
65375 + .data = &grsec_lock,
65376 + .maxlen = sizeof(int),
65377 + .mode = 0600,
65378 + .proc_handler = &proc_dointvec,
65379 + },
65380 +#endif
65381 +#ifdef CONFIG_GRKERNSEC_ROFS
65382 + {
65383 + .ctl_name = CTL_UNNUMBERED,
65384 + .procname = "romount_protect",
65385 + .data = &grsec_enable_rofs,
65386 + .maxlen = sizeof(int),
65387 + .mode = 0600,
65388 + .proc_handler = &proc_dointvec_minmax,
65389 + .extra1 = &one,
65390 + .extra2 = &one,
65391 + },
65392 +#endif
65393 + { .ctl_name = 0 }
65394 +};
65395 +#endif
65396 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
65397 new file mode 100644
65398 index 0000000..0dc13c3
65399 --- /dev/null
65400 +++ b/grsecurity/grsec_time.c
65401 @@ -0,0 +1,16 @@
65402 +#include <linux/kernel.h>
65403 +#include <linux/sched.h>
65404 +#include <linux/grinternal.h>
65405 +#include <linux/module.h>
65406 +
65407 +void
65408 +gr_log_timechange(void)
65409 +{
65410 +#ifdef CONFIG_GRKERNSEC_TIME
65411 + if (grsec_enable_time)
65412 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
65413 +#endif
65414 + return;
65415 +}
65416 +
65417 +EXPORT_SYMBOL(gr_log_timechange);
65418 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
65419 new file mode 100644
65420 index 0000000..07e0dc0
65421 --- /dev/null
65422 +++ b/grsecurity/grsec_tpe.c
65423 @@ -0,0 +1,73 @@
65424 +#include <linux/kernel.h>
65425 +#include <linux/sched.h>
65426 +#include <linux/file.h>
65427 +#include <linux/fs.h>
65428 +#include <linux/grinternal.h>
65429 +
65430 +extern int gr_acl_tpe_check(void);
65431 +
65432 +int
65433 +gr_tpe_allow(const struct file *file)
65434 +{
65435 +#ifdef CONFIG_GRKERNSEC
65436 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
65437 + const struct cred *cred = current_cred();
65438 + char *msg = NULL;
65439 + char *msg2 = NULL;
65440 +
65441 + // never restrict root
65442 + if (!cred->uid)
65443 + return 1;
65444 +
65445 + if (grsec_enable_tpe) {
65446 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65447 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
65448 + msg = "not being in trusted group";
65449 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
65450 + msg = "being in untrusted group";
65451 +#else
65452 + if (in_group_p(grsec_tpe_gid))
65453 + msg = "being in untrusted group";
65454 +#endif
65455 + }
65456 + if (!msg && gr_acl_tpe_check())
65457 + msg = "being in untrusted role";
65458 +
65459 + // not in any affected group/role
65460 + if (!msg)
65461 + goto next_check;
65462 +
65463 + if (inode->i_uid)
65464 + msg2 = "file in non-root-owned directory";
65465 + else if (inode->i_mode & S_IWOTH)
65466 + msg2 = "file in world-writable directory";
65467 + else if (inode->i_mode & S_IWGRP)
65468 + msg2 = "file in group-writable directory";
65469 +
65470 + if (msg && msg2) {
65471 + char fullmsg[70] = {0};
65472 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
65473 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
65474 + return 0;
65475 + }
65476 + msg = NULL;
65477 +next_check:
65478 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
65479 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
65480 + return 1;
65481 +
65482 + if (inode->i_uid && (inode->i_uid != cred->uid))
65483 + msg = "directory not owned by user";
65484 + else if (inode->i_mode & S_IWOTH)
65485 + msg = "file in world-writable directory";
65486 + else if (inode->i_mode & S_IWGRP)
65487 + msg = "file in group-writable directory";
65488 +
65489 + if (msg) {
65490 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
65491 + return 0;
65492 + }
65493 +#endif
65494 +#endif
65495 + return 1;
65496 +}
65497 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
65498 new file mode 100644
65499 index 0000000..9f7b1ac
65500 --- /dev/null
65501 +++ b/grsecurity/grsum.c
65502 @@ -0,0 +1,61 @@
65503 +#include <linux/err.h>
65504 +#include <linux/kernel.h>
65505 +#include <linux/sched.h>
65506 +#include <linux/mm.h>
65507 +#include <linux/scatterlist.h>
65508 +#include <linux/crypto.h>
65509 +#include <linux/gracl.h>
65510 +
65511 +
65512 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
65513 +#error "crypto and sha256 must be built into the kernel"
65514 +#endif
65515 +
65516 +int
65517 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
65518 +{
65519 + char *p;
65520 + struct crypto_hash *tfm;
65521 + struct hash_desc desc;
65522 + struct scatterlist sg;
65523 + unsigned char temp_sum[GR_SHA_LEN];
65524 + volatile int retval = 0;
65525 + volatile int dummy = 0;
65526 + unsigned int i;
65527 +
65528 + sg_init_table(&sg, 1);
65529 +
65530 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
65531 + if (IS_ERR(tfm)) {
65532 + /* should never happen, since sha256 should be built in */
65533 + return 1;
65534 + }
65535 +
65536 + desc.tfm = tfm;
65537 + desc.flags = 0;
65538 +
65539 + crypto_hash_init(&desc);
65540 +
65541 + p = salt;
65542 + sg_set_buf(&sg, p, GR_SALT_LEN);
65543 + crypto_hash_update(&desc, &sg, sg.length);
65544 +
65545 + p = entry->pw;
65546 + sg_set_buf(&sg, p, strlen(p));
65547 +
65548 + crypto_hash_update(&desc, &sg, sg.length);
65549 +
65550 + crypto_hash_final(&desc, temp_sum);
65551 +
65552 + memset(entry->pw, 0, GR_PW_LEN);
65553 +
65554 + for (i = 0; i < GR_SHA_LEN; i++)
65555 + if (sum[i] != temp_sum[i])
65556 + retval = 1;
65557 + else
65558 + dummy = 1; // waste a cycle
65559 +
65560 + crypto_free_hash(tfm);
65561 +
65562 + return retval;
65563 +}
65564 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
65565 index 3cd9ccd..fe16d47 100644
65566 --- a/include/acpi/acpi_bus.h
65567 +++ b/include/acpi/acpi_bus.h
65568 @@ -107,7 +107,7 @@ struct acpi_device_ops {
65569 acpi_op_bind bind;
65570 acpi_op_unbind unbind;
65571 acpi_op_notify notify;
65572 -};
65573 +} __no_const;
65574
65575 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
65576
65577 diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
65578 index f4906f6..71feb73 100644
65579 --- a/include/acpi/acpi_drivers.h
65580 +++ b/include/acpi/acpi_drivers.h
65581 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
65582 Dock Station
65583 -------------------------------------------------------------------------- */
65584 struct acpi_dock_ops {
65585 - acpi_notify_handler handler;
65586 - acpi_notify_handler uevent;
65587 + const acpi_notify_handler handler;
65588 + const acpi_notify_handler uevent;
65589 };
65590
65591 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
65592 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
65593 extern int register_dock_notifier(struct notifier_block *nb);
65594 extern void unregister_dock_notifier(struct notifier_block *nb);
65595 extern int register_hotplug_dock_device(acpi_handle handle,
65596 - struct acpi_dock_ops *ops,
65597 + const struct acpi_dock_ops *ops,
65598 void *context);
65599 extern void unregister_hotplug_dock_device(acpi_handle handle);
65600 #else
65601 @@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
65602 {
65603 }
65604 static inline int register_hotplug_dock_device(acpi_handle handle,
65605 - struct acpi_dock_ops *ops,
65606 + const struct acpi_dock_ops *ops,
65607 void *context)
65608 {
65609 return -ENODEV;
65610 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
65611 index b7babf0..a9ac9fc 100644
65612 --- a/include/asm-generic/atomic-long.h
65613 +++ b/include/asm-generic/atomic-long.h
65614 @@ -22,6 +22,12 @@
65615
65616 typedef atomic64_t atomic_long_t;
65617
65618 +#ifdef CONFIG_PAX_REFCOUNT
65619 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
65620 +#else
65621 +typedef atomic64_t atomic_long_unchecked_t;
65622 +#endif
65623 +
65624 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
65625
65626 static inline long atomic_long_read(atomic_long_t *l)
65627 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65628 return (long)atomic64_read(v);
65629 }
65630
65631 +#ifdef CONFIG_PAX_REFCOUNT
65632 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65633 +{
65634 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65635 +
65636 + return (long)atomic64_read_unchecked(v);
65637 +}
65638 +#endif
65639 +
65640 static inline void atomic_long_set(atomic_long_t *l, long i)
65641 {
65642 atomic64_t *v = (atomic64_t *)l;
65643 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65644 atomic64_set(v, i);
65645 }
65646
65647 +#ifdef CONFIG_PAX_REFCOUNT
65648 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65649 +{
65650 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65651 +
65652 + atomic64_set_unchecked(v, i);
65653 +}
65654 +#endif
65655 +
65656 static inline void atomic_long_inc(atomic_long_t *l)
65657 {
65658 atomic64_t *v = (atomic64_t *)l;
65659 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65660 atomic64_inc(v);
65661 }
65662
65663 +#ifdef CONFIG_PAX_REFCOUNT
65664 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65665 +{
65666 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65667 +
65668 + atomic64_inc_unchecked(v);
65669 +}
65670 +#endif
65671 +
65672 static inline void atomic_long_dec(atomic_long_t *l)
65673 {
65674 atomic64_t *v = (atomic64_t *)l;
65675 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65676 atomic64_dec(v);
65677 }
65678
65679 +#ifdef CONFIG_PAX_REFCOUNT
65680 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65681 +{
65682 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65683 +
65684 + atomic64_dec_unchecked(v);
65685 +}
65686 +#endif
65687 +
65688 static inline void atomic_long_add(long i, atomic_long_t *l)
65689 {
65690 atomic64_t *v = (atomic64_t *)l;
65691 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65692 atomic64_add(i, v);
65693 }
65694
65695 +#ifdef CONFIG_PAX_REFCOUNT
65696 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65697 +{
65698 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65699 +
65700 + atomic64_add_unchecked(i, v);
65701 +}
65702 +#endif
65703 +
65704 static inline void atomic_long_sub(long i, atomic_long_t *l)
65705 {
65706 atomic64_t *v = (atomic64_t *)l;
65707 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65708 return (long)atomic64_inc_return(v);
65709 }
65710
65711 +#ifdef CONFIG_PAX_REFCOUNT
65712 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65713 +{
65714 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65715 +
65716 + return (long)atomic64_inc_return_unchecked(v);
65717 +}
65718 +#endif
65719 +
65720 static inline long atomic_long_dec_return(atomic_long_t *l)
65721 {
65722 atomic64_t *v = (atomic64_t *)l;
65723 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65724
65725 typedef atomic_t atomic_long_t;
65726
65727 +#ifdef CONFIG_PAX_REFCOUNT
65728 +typedef atomic_unchecked_t atomic_long_unchecked_t;
65729 +#else
65730 +typedef atomic_t atomic_long_unchecked_t;
65731 +#endif
65732 +
65733 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
65734 static inline long atomic_long_read(atomic_long_t *l)
65735 {
65736 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65737 return (long)atomic_read(v);
65738 }
65739
65740 +#ifdef CONFIG_PAX_REFCOUNT
65741 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65742 +{
65743 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65744 +
65745 + return (long)atomic_read_unchecked(v);
65746 +}
65747 +#endif
65748 +
65749 static inline void atomic_long_set(atomic_long_t *l, long i)
65750 {
65751 atomic_t *v = (atomic_t *)l;
65752 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65753 atomic_set(v, i);
65754 }
65755
65756 +#ifdef CONFIG_PAX_REFCOUNT
65757 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65758 +{
65759 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65760 +
65761 + atomic_set_unchecked(v, i);
65762 +}
65763 +#endif
65764 +
65765 static inline void atomic_long_inc(atomic_long_t *l)
65766 {
65767 atomic_t *v = (atomic_t *)l;
65768 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65769 atomic_inc(v);
65770 }
65771
65772 +#ifdef CONFIG_PAX_REFCOUNT
65773 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65774 +{
65775 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65776 +
65777 + atomic_inc_unchecked(v);
65778 +}
65779 +#endif
65780 +
65781 static inline void atomic_long_dec(atomic_long_t *l)
65782 {
65783 atomic_t *v = (atomic_t *)l;
65784 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65785 atomic_dec(v);
65786 }
65787
65788 +#ifdef CONFIG_PAX_REFCOUNT
65789 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65790 +{
65791 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65792 +
65793 + atomic_dec_unchecked(v);
65794 +}
65795 +#endif
65796 +
65797 static inline void atomic_long_add(long i, atomic_long_t *l)
65798 {
65799 atomic_t *v = (atomic_t *)l;
65800 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65801 atomic_add(i, v);
65802 }
65803
65804 +#ifdef CONFIG_PAX_REFCOUNT
65805 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65806 +{
65807 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65808 +
65809 + atomic_add_unchecked(i, v);
65810 +}
65811 +#endif
65812 +
65813 static inline void atomic_long_sub(long i, atomic_long_t *l)
65814 {
65815 atomic_t *v = (atomic_t *)l;
65816 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65817 return (long)atomic_inc_return(v);
65818 }
65819
65820 +#ifdef CONFIG_PAX_REFCOUNT
65821 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65822 +{
65823 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65824 +
65825 + return (long)atomic_inc_return_unchecked(v);
65826 +}
65827 +#endif
65828 +
65829 static inline long atomic_long_dec_return(atomic_long_t *l)
65830 {
65831 atomic_t *v = (atomic_t *)l;
65832 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65833
65834 #endif /* BITS_PER_LONG == 64 */
65835
65836 +#ifdef CONFIG_PAX_REFCOUNT
65837 +static inline void pax_refcount_needs_these_functions(void)
65838 +{
65839 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
65840 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65841 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65842 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65843 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65844 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65845 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65846 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65847 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65848 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65849 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65850 +
65851 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65852 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65853 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65854 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65855 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65856 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65857 +}
65858 +#else
65859 +#define atomic_read_unchecked(v) atomic_read(v)
65860 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65861 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65862 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65863 +#define atomic_inc_unchecked(v) atomic_inc(v)
65864 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65865 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65866 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65867 +#define atomic_dec_unchecked(v) atomic_dec(v)
65868 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65869 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65870 +
65871 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
65872 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65873 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65874 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65875 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65876 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65877 +#endif
65878 +
65879 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65880 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65881 index b18ce4f..2ee2843 100644
65882 --- a/include/asm-generic/atomic64.h
65883 +++ b/include/asm-generic/atomic64.h
65884 @@ -16,6 +16,8 @@ typedef struct {
65885 long long counter;
65886 } atomic64_t;
65887
65888 +typedef atomic64_t atomic64_unchecked_t;
65889 +
65890 #define ATOMIC64_INIT(i) { (i) }
65891
65892 extern long long atomic64_read(const atomic64_t *v);
65893 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65894 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65895 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65896
65897 +#define atomic64_read_unchecked(v) atomic64_read(v)
65898 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65899 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65900 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65901 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65902 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
65903 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65904 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
65905 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65906 +
65907 #endif /* _ASM_GENERIC_ATOMIC64_H */
65908 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
65909 index d48ddf0..656a0ac 100644
65910 --- a/include/asm-generic/bug.h
65911 +++ b/include/asm-generic/bug.h
65912 @@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
65913
65914 #else /* !CONFIG_BUG */
65915 #ifndef HAVE_ARCH_BUG
65916 -#define BUG() do {} while(0)
65917 +#define BUG() do { for (;;) ; } while(0)
65918 #endif
65919
65920 #ifndef HAVE_ARCH_BUG_ON
65921 -#define BUG_ON(condition) do { if (condition) ; } while(0)
65922 +#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
65923 #endif
65924
65925 #ifndef HAVE_ARCH_WARN_ON
65926 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
65927 index 1bfcfe5..e04c5c9 100644
65928 --- a/include/asm-generic/cache.h
65929 +++ b/include/asm-generic/cache.h
65930 @@ -6,7 +6,7 @@
65931 * cache lines need to provide their own cache.h.
65932 */
65933
65934 -#define L1_CACHE_SHIFT 5
65935 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
65936 +#define L1_CACHE_SHIFT 5UL
65937 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
65938
65939 #endif /* __ASM_GENERIC_CACHE_H */
65940 diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
65941 index 6920695..41038bc 100644
65942 --- a/include/asm-generic/dma-mapping-common.h
65943 +++ b/include/asm-generic/dma-mapping-common.h
65944 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
65945 enum dma_data_direction dir,
65946 struct dma_attrs *attrs)
65947 {
65948 - struct dma_map_ops *ops = get_dma_ops(dev);
65949 + const struct dma_map_ops *ops = get_dma_ops(dev);
65950 dma_addr_t addr;
65951
65952 kmemcheck_mark_initialized(ptr, size);
65953 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
65954 enum dma_data_direction dir,
65955 struct dma_attrs *attrs)
65956 {
65957 - struct dma_map_ops *ops = get_dma_ops(dev);
65958 + const struct dma_map_ops *ops = get_dma_ops(dev);
65959
65960 BUG_ON(!valid_dma_direction(dir));
65961 if (ops->unmap_page)
65962 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
65963 int nents, enum dma_data_direction dir,
65964 struct dma_attrs *attrs)
65965 {
65966 - struct dma_map_ops *ops = get_dma_ops(dev);
65967 + const struct dma_map_ops *ops = get_dma_ops(dev);
65968 int i, ents;
65969 struct scatterlist *s;
65970
65971 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
65972 int nents, enum dma_data_direction dir,
65973 struct dma_attrs *attrs)
65974 {
65975 - struct dma_map_ops *ops = get_dma_ops(dev);
65976 + const struct dma_map_ops *ops = get_dma_ops(dev);
65977
65978 BUG_ON(!valid_dma_direction(dir));
65979 debug_dma_unmap_sg(dev, sg, nents, dir);
65980 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65981 size_t offset, size_t size,
65982 enum dma_data_direction dir)
65983 {
65984 - struct dma_map_ops *ops = get_dma_ops(dev);
65985 + const struct dma_map_ops *ops = get_dma_ops(dev);
65986 dma_addr_t addr;
65987
65988 kmemcheck_mark_initialized(page_address(page) + offset, size);
65989 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65990 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
65991 size_t size, enum dma_data_direction dir)
65992 {
65993 - struct dma_map_ops *ops = get_dma_ops(dev);
65994 + const struct dma_map_ops *ops = get_dma_ops(dev);
65995
65996 BUG_ON(!valid_dma_direction(dir));
65997 if (ops->unmap_page)
65998 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
65999 size_t size,
66000 enum dma_data_direction dir)
66001 {
66002 - struct dma_map_ops *ops = get_dma_ops(dev);
66003 + const struct dma_map_ops *ops = get_dma_ops(dev);
66004
66005 BUG_ON(!valid_dma_direction(dir));
66006 if (ops->sync_single_for_cpu)
66007 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
66008 dma_addr_t addr, size_t size,
66009 enum dma_data_direction dir)
66010 {
66011 - struct dma_map_ops *ops = get_dma_ops(dev);
66012 + const struct dma_map_ops *ops = get_dma_ops(dev);
66013
66014 BUG_ON(!valid_dma_direction(dir));
66015 if (ops->sync_single_for_device)
66016 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
66017 size_t size,
66018 enum dma_data_direction dir)
66019 {
66020 - struct dma_map_ops *ops = get_dma_ops(dev);
66021 + const struct dma_map_ops *ops = get_dma_ops(dev);
66022
66023 BUG_ON(!valid_dma_direction(dir));
66024 if (ops->sync_single_range_for_cpu) {
66025 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
66026 size_t size,
66027 enum dma_data_direction dir)
66028 {
66029 - struct dma_map_ops *ops = get_dma_ops(dev);
66030 + const struct dma_map_ops *ops = get_dma_ops(dev);
66031
66032 BUG_ON(!valid_dma_direction(dir));
66033 if (ops->sync_single_range_for_device) {
66034 @@ -155,7 +155,7 @@ static inline void
66035 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
66036 int nelems, enum dma_data_direction dir)
66037 {
66038 - struct dma_map_ops *ops = get_dma_ops(dev);
66039 + const struct dma_map_ops *ops = get_dma_ops(dev);
66040
66041 BUG_ON(!valid_dma_direction(dir));
66042 if (ops->sync_sg_for_cpu)
66043 @@ -167,7 +167,7 @@ static inline void
66044 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
66045 int nelems, enum dma_data_direction dir)
66046 {
66047 - struct dma_map_ops *ops = get_dma_ops(dev);
66048 + const struct dma_map_ops *ops = get_dma_ops(dev);
66049
66050 BUG_ON(!valid_dma_direction(dir));
66051 if (ops->sync_sg_for_device)
66052 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
66053 index 0d68a1e..b74a761 100644
66054 --- a/include/asm-generic/emergency-restart.h
66055 +++ b/include/asm-generic/emergency-restart.h
66056 @@ -1,7 +1,7 @@
66057 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
66058 #define _ASM_GENERIC_EMERGENCY_RESTART_H
66059
66060 -static inline void machine_emergency_restart(void)
66061 +static inline __noreturn void machine_emergency_restart(void)
66062 {
66063 machine_restart(NULL);
66064 }
66065 diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
66066 index 3c2344f..4590a7d 100644
66067 --- a/include/asm-generic/futex.h
66068 +++ b/include/asm-generic/futex.h
66069 @@ -6,7 +6,7 @@
66070 #include <asm/errno.h>
66071
66072 static inline int
66073 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
66074 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
66075 {
66076 int op = (encoded_op >> 28) & 7;
66077 int cmp = (encoded_op >> 24) & 15;
66078 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
66079 }
66080
66081 static inline int
66082 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
66083 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
66084 {
66085 return -ENOSYS;
66086 }
66087 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
66088 index 1ca3efc..e3dc852 100644
66089 --- a/include/asm-generic/int-l64.h
66090 +++ b/include/asm-generic/int-l64.h
66091 @@ -46,6 +46,8 @@ typedef unsigned int u32;
66092 typedef signed long s64;
66093 typedef unsigned long u64;
66094
66095 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
66096 +
66097 #define S8_C(x) x
66098 #define U8_C(x) x ## U
66099 #define S16_C(x) x
66100 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
66101 index f394147..b6152b9 100644
66102 --- a/include/asm-generic/int-ll64.h
66103 +++ b/include/asm-generic/int-ll64.h
66104 @@ -51,6 +51,8 @@ typedef unsigned int u32;
66105 typedef signed long long s64;
66106 typedef unsigned long long u64;
66107
66108 +typedef unsigned long long intoverflow_t;
66109 +
66110 #define S8_C(x) x
66111 #define U8_C(x) x ## U
66112 #define S16_C(x) x
66113 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
66114 index e5f234a..cdb16b3 100644
66115 --- a/include/asm-generic/kmap_types.h
66116 +++ b/include/asm-generic/kmap_types.h
66117 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
66118 KMAP_D(16) KM_IRQ_PTE,
66119 KMAP_D(17) KM_NMI,
66120 KMAP_D(18) KM_NMI_PTE,
66121 -KMAP_D(19) KM_TYPE_NR
66122 +KMAP_D(19) KM_CLEARPAGE,
66123 +KMAP_D(20) KM_TYPE_NR
66124 };
66125
66126 #undef KMAP_D
66127 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
66128 index 725612b..9cc513a 100644
66129 --- a/include/asm-generic/pgtable-nopmd.h
66130 +++ b/include/asm-generic/pgtable-nopmd.h
66131 @@ -1,14 +1,19 @@
66132 #ifndef _PGTABLE_NOPMD_H
66133 #define _PGTABLE_NOPMD_H
66134
66135 -#ifndef __ASSEMBLY__
66136 -
66137 #include <asm-generic/pgtable-nopud.h>
66138
66139 -struct mm_struct;
66140 -
66141 #define __PAGETABLE_PMD_FOLDED
66142
66143 +#define PMD_SHIFT PUD_SHIFT
66144 +#define PTRS_PER_PMD 1
66145 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
66146 +#define PMD_MASK (~(PMD_SIZE-1))
66147 +
66148 +#ifndef __ASSEMBLY__
66149 +
66150 +struct mm_struct;
66151 +
66152 /*
66153 * Having the pmd type consist of a pud gets the size right, and allows
66154 * us to conceptually access the pud entry that this pmd is folded into
66155 @@ -16,11 +21,6 @@ struct mm_struct;
66156 */
66157 typedef struct { pud_t pud; } pmd_t;
66158
66159 -#define PMD_SHIFT PUD_SHIFT
66160 -#define PTRS_PER_PMD 1
66161 -#define PMD_SIZE (1UL << PMD_SHIFT)
66162 -#define PMD_MASK (~(PMD_SIZE-1))
66163 -
66164 /*
66165 * The "pud_xxx()" functions here are trivial for a folded two-level
66166 * setup: the pmd is never bad, and a pmd always exists (as it's folded
66167 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
66168 index 810431d..ccc3638 100644
66169 --- a/include/asm-generic/pgtable-nopud.h
66170 +++ b/include/asm-generic/pgtable-nopud.h
66171 @@ -1,10 +1,15 @@
66172 #ifndef _PGTABLE_NOPUD_H
66173 #define _PGTABLE_NOPUD_H
66174
66175 -#ifndef __ASSEMBLY__
66176 -
66177 #define __PAGETABLE_PUD_FOLDED
66178
66179 +#define PUD_SHIFT PGDIR_SHIFT
66180 +#define PTRS_PER_PUD 1
66181 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
66182 +#define PUD_MASK (~(PUD_SIZE-1))
66183 +
66184 +#ifndef __ASSEMBLY__
66185 +
66186 /*
66187 * Having the pud type consist of a pgd gets the size right, and allows
66188 * us to conceptually access the pgd entry that this pud is folded into
66189 @@ -12,11 +17,6 @@
66190 */
66191 typedef struct { pgd_t pgd; } pud_t;
66192
66193 -#define PUD_SHIFT PGDIR_SHIFT
66194 -#define PTRS_PER_PUD 1
66195 -#define PUD_SIZE (1UL << PUD_SHIFT)
66196 -#define PUD_MASK (~(PUD_SIZE-1))
66197 -
66198 /*
66199 * The "pgd_xxx()" functions here are trivial for a folded two-level
66200 * setup: the pud is never bad, and a pud always exists (as it's folded
66201 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
66202 index e2bd73e..fea8ed3 100644
66203 --- a/include/asm-generic/pgtable.h
66204 +++ b/include/asm-generic/pgtable.h
66205 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
66206 unsigned long size);
66207 #endif
66208
66209 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
66210 +static inline unsigned long pax_open_kernel(void) { return 0; }
66211 +#endif
66212 +
66213 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
66214 +static inline unsigned long pax_close_kernel(void) { return 0; }
66215 +#endif
66216 +
66217 #endif /* !__ASSEMBLY__ */
66218
66219 #endif /* _ASM_GENERIC_PGTABLE_H */
66220 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
66221 index b6e818f..21aa58a 100644
66222 --- a/include/asm-generic/vmlinux.lds.h
66223 +++ b/include/asm-generic/vmlinux.lds.h
66224 @@ -199,6 +199,7 @@
66225 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
66226 VMLINUX_SYMBOL(__start_rodata) = .; \
66227 *(.rodata) *(.rodata.*) \
66228 + *(.data.read_only) \
66229 *(__vermagic) /* Kernel version magic */ \
66230 *(__markers_strings) /* Markers: strings */ \
66231 *(__tracepoints_strings)/* Tracepoints: strings */ \
66232 @@ -656,22 +657,24 @@
66233 * section in the linker script will go there too. @phdr should have
66234 * a leading colon.
66235 *
66236 - * Note that this macros defines __per_cpu_load as an absolute symbol.
66237 + * Note that this macros defines per_cpu_load as an absolute symbol.
66238 * If there is no need to put the percpu section at a predetermined
66239 * address, use PERCPU().
66240 */
66241 #define PERCPU_VADDR(vaddr, phdr) \
66242 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
66243 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
66244 + per_cpu_load = .; \
66245 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
66246 - LOAD_OFFSET) { \
66247 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
66248 VMLINUX_SYMBOL(__per_cpu_start) = .; \
66249 *(.data.percpu.first) \
66250 - *(.data.percpu.page_aligned) \
66251 *(.data.percpu) \
66252 + . = ALIGN(PAGE_SIZE); \
66253 + *(.data.percpu.page_aligned) \
66254 *(.data.percpu.shared_aligned) \
66255 VMLINUX_SYMBOL(__per_cpu_end) = .; \
66256 } phdr \
66257 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
66258 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
66259
66260 /**
66261 * PERCPU - define output section for percpu area, simple version
66262 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
66263 index ebab6a6..351dba1 100644
66264 --- a/include/drm/drmP.h
66265 +++ b/include/drm/drmP.h
66266 @@ -71,6 +71,7 @@
66267 #include <linux/workqueue.h>
66268 #include <linux/poll.h>
66269 #include <asm/pgalloc.h>
66270 +#include <asm/local.h>
66271 #include "drm.h"
66272
66273 #include <linux/idr.h>
66274 @@ -814,7 +815,7 @@ struct drm_driver {
66275 void (*vgaarb_irq)(struct drm_device *dev, bool state);
66276
66277 /* Driver private ops for this object */
66278 - struct vm_operations_struct *gem_vm_ops;
66279 + const struct vm_operations_struct *gem_vm_ops;
66280
66281 int major;
66282 int minor;
66283 @@ -917,7 +918,7 @@ struct drm_device {
66284
66285 /** \name Usage Counters */
66286 /*@{ */
66287 - int open_count; /**< Outstanding files open */
66288 + local_t open_count; /**< Outstanding files open */
66289 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
66290 atomic_t vma_count; /**< Outstanding vma areas open */
66291 int buf_use; /**< Buffers in use -- cannot alloc */
66292 @@ -928,7 +929,7 @@ struct drm_device {
66293 /*@{ */
66294 unsigned long counters;
66295 enum drm_stat_type types[15];
66296 - atomic_t counts[15];
66297 + atomic_unchecked_t counts[15];
66298 /*@} */
66299
66300 struct list_head filelist;
66301 @@ -1016,7 +1017,7 @@ struct drm_device {
66302 struct pci_controller *hose;
66303 #endif
66304 struct drm_sg_mem *sg; /**< Scatter gather memory */
66305 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
66306 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
66307 void *dev_private; /**< device private data */
66308 void *mm_private;
66309 struct address_space *dev_mapping;
66310 @@ -1042,11 +1043,11 @@ struct drm_device {
66311 spinlock_t object_name_lock;
66312 struct idr object_name_idr;
66313 atomic_t object_count;
66314 - atomic_t object_memory;
66315 + atomic_unchecked_t object_memory;
66316 atomic_t pin_count;
66317 - atomic_t pin_memory;
66318 + atomic_unchecked_t pin_memory;
66319 atomic_t gtt_count;
66320 - atomic_t gtt_memory;
66321 + atomic_unchecked_t gtt_memory;
66322 uint32_t gtt_total;
66323 uint32_t invalidate_domains; /* domains pending invalidation */
66324 uint32_t flush_domains; /* domains pending flush */
66325 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
66326 index b29e201..3413cc9 100644
66327 --- a/include/drm/drm_crtc_helper.h
66328 +++ b/include/drm/drm_crtc_helper.h
66329 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
66330
66331 /* reload the current crtc LUT */
66332 void (*load_lut)(struct drm_crtc *crtc);
66333 -};
66334 +} __no_const;
66335
66336 struct drm_encoder_helper_funcs {
66337 void (*dpms)(struct drm_encoder *encoder, int mode);
66338 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
66339 struct drm_connector *connector);
66340 /* disable encoder when not in use - more explicit than dpms off */
66341 void (*disable)(struct drm_encoder *encoder);
66342 -};
66343 +} __no_const;
66344
66345 struct drm_connector_helper_funcs {
66346 int (*get_modes)(struct drm_connector *connector);
66347 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
66348 index b199170..6f9e64c 100644
66349 --- a/include/drm/ttm/ttm_memory.h
66350 +++ b/include/drm/ttm/ttm_memory.h
66351 @@ -47,7 +47,7 @@
66352
66353 struct ttm_mem_shrink {
66354 int (*do_shrink) (struct ttm_mem_shrink *);
66355 -};
66356 +} __no_const;
66357
66358 /**
66359 * struct ttm_mem_global - Global memory accounting structure.
66360 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
66361 index e86dfca..40cc55f 100644
66362 --- a/include/linux/a.out.h
66363 +++ b/include/linux/a.out.h
66364 @@ -39,6 +39,14 @@ enum machine_type {
66365 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
66366 };
66367
66368 +/* Constants for the N_FLAGS field */
66369 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66370 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
66371 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
66372 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
66373 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66374 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66375 +
66376 #if !defined (N_MAGIC)
66377 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
66378 #endif
66379 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
66380 index 817b237..62c10bc 100644
66381 --- a/include/linux/atmdev.h
66382 +++ b/include/linux/atmdev.h
66383 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
66384 #endif
66385
66386 struct k_atm_aal_stats {
66387 -#define __HANDLE_ITEM(i) atomic_t i
66388 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
66389 __AAL_STAT_ITEMS
66390 #undef __HANDLE_ITEM
66391 };
66392 diff --git a/include/linux/backlight.h b/include/linux/backlight.h
66393 index 0f5f578..8c4f884 100644
66394 --- a/include/linux/backlight.h
66395 +++ b/include/linux/backlight.h
66396 @@ -36,18 +36,18 @@ struct backlight_device;
66397 struct fb_info;
66398
66399 struct backlight_ops {
66400 - unsigned int options;
66401 + const unsigned int options;
66402
66403 #define BL_CORE_SUSPENDRESUME (1 << 0)
66404
66405 /* Notify the backlight driver some property has changed */
66406 - int (*update_status)(struct backlight_device *);
66407 + int (* const update_status)(struct backlight_device *);
66408 /* Return the current backlight brightness (accounting for power,
66409 fb_blank etc.) */
66410 - int (*get_brightness)(struct backlight_device *);
66411 + int (* const get_brightness)(struct backlight_device *);
66412 /* Check if given framebuffer device is the one bound to this backlight;
66413 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
66414 - int (*check_fb)(struct fb_info *);
66415 + int (* const check_fb)(struct fb_info *);
66416 };
66417
66418 /* This structure defines all the properties of a backlight */
66419 @@ -86,7 +86,7 @@ struct backlight_device {
66420 registered this device has been unloaded, and if class_get_devdata()
66421 points to something in the body of that driver, it is also invalid. */
66422 struct mutex ops_lock;
66423 - struct backlight_ops *ops;
66424 + const struct backlight_ops *ops;
66425
66426 /* The framebuffer notifier block */
66427 struct notifier_block fb_notif;
66428 @@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
66429 }
66430
66431 extern struct backlight_device *backlight_device_register(const char *name,
66432 - struct device *dev, void *devdata, struct backlight_ops *ops);
66433 + struct device *dev, void *devdata, const struct backlight_ops *ops);
66434 extern void backlight_device_unregister(struct backlight_device *bd);
66435 extern void backlight_force_update(struct backlight_device *bd,
66436 enum backlight_update_reason reason);
66437 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
66438 index a3d802e..93a2ef4 100644
66439 --- a/include/linux/binfmts.h
66440 +++ b/include/linux/binfmts.h
66441 @@ -18,7 +18,7 @@ struct pt_regs;
66442 #define BINPRM_BUF_SIZE 128
66443
66444 #ifdef __KERNEL__
66445 -#include <linux/list.h>
66446 +#include <linux/sched.h>
66447
66448 #define CORENAME_MAX_SIZE 128
66449
66450 @@ -58,6 +58,7 @@ struct linux_binprm{
66451 unsigned interp_flags;
66452 unsigned interp_data;
66453 unsigned long loader, exec;
66454 + char tcomm[TASK_COMM_LEN];
66455 };
66456
66457 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
66458 @@ -83,6 +84,7 @@ struct linux_binfmt {
66459 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
66460 int (*load_shlib)(struct file *);
66461 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
66462 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
66463 unsigned long min_coredump; /* minimal dump size */
66464 int hasvdso;
66465 };
66466 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
66467 index 5eb6cb0..a2906d2 100644
66468 --- a/include/linux/blkdev.h
66469 +++ b/include/linux/blkdev.h
66470 @@ -1281,7 +1281,7 @@ struct block_device_operations {
66471 int (*revalidate_disk) (struct gendisk *);
66472 int (*getgeo)(struct block_device *, struct hd_geometry *);
66473 struct module *owner;
66474 -};
66475 +} __do_const;
66476
66477 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
66478 unsigned long);
66479 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
66480 index 3b73b99..629d21b 100644
66481 --- a/include/linux/blktrace_api.h
66482 +++ b/include/linux/blktrace_api.h
66483 @@ -160,7 +160,7 @@ struct blk_trace {
66484 struct dentry *dir;
66485 struct dentry *dropped_file;
66486 struct dentry *msg_file;
66487 - atomic_t dropped;
66488 + atomic_unchecked_t dropped;
66489 };
66490
66491 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
66492 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
66493 index 83195fb..0b0f77d 100644
66494 --- a/include/linux/byteorder/little_endian.h
66495 +++ b/include/linux/byteorder/little_endian.h
66496 @@ -42,51 +42,51 @@
66497
66498 static inline __le64 __cpu_to_le64p(const __u64 *p)
66499 {
66500 - return (__force __le64)*p;
66501 + return (__force const __le64)*p;
66502 }
66503 static inline __u64 __le64_to_cpup(const __le64 *p)
66504 {
66505 - return (__force __u64)*p;
66506 + return (__force const __u64)*p;
66507 }
66508 static inline __le32 __cpu_to_le32p(const __u32 *p)
66509 {
66510 - return (__force __le32)*p;
66511 + return (__force const __le32)*p;
66512 }
66513 static inline __u32 __le32_to_cpup(const __le32 *p)
66514 {
66515 - return (__force __u32)*p;
66516 + return (__force const __u32)*p;
66517 }
66518 static inline __le16 __cpu_to_le16p(const __u16 *p)
66519 {
66520 - return (__force __le16)*p;
66521 + return (__force const __le16)*p;
66522 }
66523 static inline __u16 __le16_to_cpup(const __le16 *p)
66524 {
66525 - return (__force __u16)*p;
66526 + return (__force const __u16)*p;
66527 }
66528 static inline __be64 __cpu_to_be64p(const __u64 *p)
66529 {
66530 - return (__force __be64)__swab64p(p);
66531 + return (__force const __be64)__swab64p(p);
66532 }
66533 static inline __u64 __be64_to_cpup(const __be64 *p)
66534 {
66535 - return __swab64p((__u64 *)p);
66536 + return __swab64p((const __u64 *)p);
66537 }
66538 static inline __be32 __cpu_to_be32p(const __u32 *p)
66539 {
66540 - return (__force __be32)__swab32p(p);
66541 + return (__force const __be32)__swab32p(p);
66542 }
66543 static inline __u32 __be32_to_cpup(const __be32 *p)
66544 {
66545 - return __swab32p((__u32 *)p);
66546 + return __swab32p((const __u32 *)p);
66547 }
66548 static inline __be16 __cpu_to_be16p(const __u16 *p)
66549 {
66550 - return (__force __be16)__swab16p(p);
66551 + return (__force const __be16)__swab16p(p);
66552 }
66553 static inline __u16 __be16_to_cpup(const __be16 *p)
66554 {
66555 - return __swab16p((__u16 *)p);
66556 + return __swab16p((const __u16 *)p);
66557 }
66558 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
66559 #define __le64_to_cpus(x) do { (void)(x); } while (0)
66560 diff --git a/include/linux/cache.h b/include/linux/cache.h
66561 index 97e2488..e7576b9 100644
66562 --- a/include/linux/cache.h
66563 +++ b/include/linux/cache.h
66564 @@ -16,6 +16,10 @@
66565 #define __read_mostly
66566 #endif
66567
66568 +#ifndef __read_only
66569 +#define __read_only __read_mostly
66570 +#endif
66571 +
66572 #ifndef ____cacheline_aligned
66573 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
66574 #endif
66575 diff --git a/include/linux/capability.h b/include/linux/capability.h
66576 index c8f2a5f7..1618a5c 100644
66577 --- a/include/linux/capability.h
66578 +++ b/include/linux/capability.h
66579 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
66580 (security_real_capable_noaudit((t), (cap)) == 0)
66581
66582 extern int capable(int cap);
66583 +int capable_nolog(int cap);
66584
66585 /* audit system wants to get cap info from files as well */
66586 struct dentry;
66587 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
66588 index 450fa59..86019fb 100644
66589 --- a/include/linux/compiler-gcc4.h
66590 +++ b/include/linux/compiler-gcc4.h
66591 @@ -36,4 +36,16 @@
66592 the kernel context */
66593 #define __cold __attribute__((__cold__))
66594
66595 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
66596 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
66597 +#define __bos0(ptr) __bos((ptr), 0)
66598 +#define __bos1(ptr) __bos((ptr), 1)
66599 +
66600 +#if __GNUC_MINOR__ >= 5
66601 +#ifdef CONSTIFY_PLUGIN
66602 +#define __no_const __attribute__((no_const))
66603 +#define __do_const __attribute__((do_const))
66604 +#endif
66605 +#endif
66606 +
66607 #endif
66608 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
66609 index 04fb513..fd6477b 100644
66610 --- a/include/linux/compiler.h
66611 +++ b/include/linux/compiler.h
66612 @@ -5,11 +5,14 @@
66613
66614 #ifdef __CHECKER__
66615 # define __user __attribute__((noderef, address_space(1)))
66616 +# define __force_user __force __user
66617 # define __kernel /* default address space */
66618 +# define __force_kernel __force __kernel
66619 # define __safe __attribute__((safe))
66620 # define __force __attribute__((force))
66621 # define __nocast __attribute__((nocast))
66622 # define __iomem __attribute__((noderef, address_space(2)))
66623 +# define __force_iomem __force __iomem
66624 # define __acquires(x) __attribute__((context(x,0,1)))
66625 # define __releases(x) __attribute__((context(x,1,0)))
66626 # define __acquire(x) __context__(x,1)
66627 @@ -17,13 +20,34 @@
66628 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
66629 extern void __chk_user_ptr(const volatile void __user *);
66630 extern void __chk_io_ptr(const volatile void __iomem *);
66631 +#elif defined(CHECKER_PLUGIN)
66632 +//# define __user
66633 +//# define __force_user
66634 +//# define __kernel
66635 +//# define __force_kernel
66636 +# define __safe
66637 +# define __force
66638 +# define __nocast
66639 +# define __iomem
66640 +# define __force_iomem
66641 +# define __chk_user_ptr(x) (void)0
66642 +# define __chk_io_ptr(x) (void)0
66643 +# define __builtin_warning(x, y...) (1)
66644 +# define __acquires(x)
66645 +# define __releases(x)
66646 +# define __acquire(x) (void)0
66647 +# define __release(x) (void)0
66648 +# define __cond_lock(x,c) (c)
66649 #else
66650 # define __user
66651 +# define __force_user
66652 # define __kernel
66653 +# define __force_kernel
66654 # define __safe
66655 # define __force
66656 # define __nocast
66657 # define __iomem
66658 +# define __force_iomem
66659 # define __chk_user_ptr(x) (void)0
66660 # define __chk_io_ptr(x) (void)0
66661 # define __builtin_warning(x, y...) (1)
66662 @@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66663 # define __attribute_const__ /* unimplemented */
66664 #endif
66665
66666 +#ifndef __no_const
66667 +# define __no_const
66668 +#endif
66669 +
66670 +#ifndef __do_const
66671 +# define __do_const
66672 +#endif
66673 +
66674 /*
66675 * Tell gcc if a function is cold. The compiler will assume any path
66676 * directly leading to the call is unlikely.
66677 @@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66678 #define __cold
66679 #endif
66680
66681 +#ifndef __alloc_size
66682 +#define __alloc_size(...)
66683 +#endif
66684 +
66685 +#ifndef __bos
66686 +#define __bos(ptr, arg)
66687 +#endif
66688 +
66689 +#ifndef __bos0
66690 +#define __bos0(ptr)
66691 +#endif
66692 +
66693 +#ifndef __bos1
66694 +#define __bos1(ptr)
66695 +#endif
66696 +
66697 /* Simple shorthand for a section definition */
66698 #ifndef __section
66699 # define __section(S) __attribute__ ((__section__(#S)))
66700 @@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66701 * use is to mediate communication between process-level code and irq/NMI
66702 * handlers, all running on the same CPU.
66703 */
66704 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
66705 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
66706 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
66707
66708 #endif /* __LINUX_COMPILER_H */
66709 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
66710 index fd92988..a3164bd 100644
66711 --- a/include/linux/crypto.h
66712 +++ b/include/linux/crypto.h
66713 @@ -394,7 +394,7 @@ struct cipher_tfm {
66714 const u8 *key, unsigned int keylen);
66715 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66716 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66717 -};
66718 +} __no_const;
66719
66720 struct hash_tfm {
66721 int (*init)(struct hash_desc *desc);
66722 @@ -415,13 +415,13 @@ struct compress_tfm {
66723 int (*cot_decompress)(struct crypto_tfm *tfm,
66724 const u8 *src, unsigned int slen,
66725 u8 *dst, unsigned int *dlen);
66726 -};
66727 +} __no_const;
66728
66729 struct rng_tfm {
66730 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
66731 unsigned int dlen);
66732 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
66733 -};
66734 +} __no_const;
66735
66736 #define crt_ablkcipher crt_u.ablkcipher
66737 #define crt_aead crt_u.aead
66738 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
66739 index 30b93b2..cd7a8db 100644
66740 --- a/include/linux/dcache.h
66741 +++ b/include/linux/dcache.h
66742 @@ -119,6 +119,8 @@ struct dentry {
66743 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
66744 };
66745
66746 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66747 +
66748 /*
66749 * dentry->d_lock spinlock nesting subclasses:
66750 *
66751 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
66752 index 3e9bd6a..f4e1aa0 100644
66753 --- a/include/linux/decompress/mm.h
66754 +++ b/include/linux/decompress/mm.h
66755 @@ -78,7 +78,7 @@ static void free(void *where)
66756 * warnings when not needed (indeed large_malloc / large_free are not
66757 * needed by inflate */
66758
66759 -#define malloc(a) kmalloc(a, GFP_KERNEL)
66760 +#define malloc(a) kmalloc((a), GFP_KERNEL)
66761 #define free(a) kfree(a)
66762
66763 #define large_malloc(a) vmalloc(a)
66764 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
66765 index 91b7618..92a93d32 100644
66766 --- a/include/linux/dma-mapping.h
66767 +++ b/include/linux/dma-mapping.h
66768 @@ -16,51 +16,51 @@ enum dma_data_direction {
66769 };
66770
66771 struct dma_map_ops {
66772 - void* (*alloc_coherent)(struct device *dev, size_t size,
66773 + void* (* const alloc_coherent)(struct device *dev, size_t size,
66774 dma_addr_t *dma_handle, gfp_t gfp);
66775 - void (*free_coherent)(struct device *dev, size_t size,
66776 + void (* const free_coherent)(struct device *dev, size_t size,
66777 void *vaddr, dma_addr_t dma_handle);
66778 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
66779 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
66780 unsigned long offset, size_t size,
66781 enum dma_data_direction dir,
66782 struct dma_attrs *attrs);
66783 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
66784 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
66785 size_t size, enum dma_data_direction dir,
66786 struct dma_attrs *attrs);
66787 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
66788 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
66789 int nents, enum dma_data_direction dir,
66790 struct dma_attrs *attrs);
66791 - void (*unmap_sg)(struct device *dev,
66792 + void (* const unmap_sg)(struct device *dev,
66793 struct scatterlist *sg, int nents,
66794 enum dma_data_direction dir,
66795 struct dma_attrs *attrs);
66796 - void (*sync_single_for_cpu)(struct device *dev,
66797 + void (* const sync_single_for_cpu)(struct device *dev,
66798 dma_addr_t dma_handle, size_t size,
66799 enum dma_data_direction dir);
66800 - void (*sync_single_for_device)(struct device *dev,
66801 + void (* const sync_single_for_device)(struct device *dev,
66802 dma_addr_t dma_handle, size_t size,
66803 enum dma_data_direction dir);
66804 - void (*sync_single_range_for_cpu)(struct device *dev,
66805 + void (* const sync_single_range_for_cpu)(struct device *dev,
66806 dma_addr_t dma_handle,
66807 unsigned long offset,
66808 size_t size,
66809 enum dma_data_direction dir);
66810 - void (*sync_single_range_for_device)(struct device *dev,
66811 + void (* const sync_single_range_for_device)(struct device *dev,
66812 dma_addr_t dma_handle,
66813 unsigned long offset,
66814 size_t size,
66815 enum dma_data_direction dir);
66816 - void (*sync_sg_for_cpu)(struct device *dev,
66817 + void (* const sync_sg_for_cpu)(struct device *dev,
66818 struct scatterlist *sg, int nents,
66819 enum dma_data_direction dir);
66820 - void (*sync_sg_for_device)(struct device *dev,
66821 + void (* const sync_sg_for_device)(struct device *dev,
66822 struct scatterlist *sg, int nents,
66823 enum dma_data_direction dir);
66824 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
66825 - int (*dma_supported)(struct device *dev, u64 mask);
66826 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
66827 + int (* const dma_supported)(struct device *dev, u64 mask);
66828 int (*set_dma_mask)(struct device *dev, u64 mask);
66829 int is_phys;
66830 -};
66831 +} __do_const;
66832
66833 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66834
66835 diff --git a/include/linux/dst.h b/include/linux/dst.h
66836 index e26fed8..b976d9f 100644
66837 --- a/include/linux/dst.h
66838 +++ b/include/linux/dst.h
66839 @@ -380,7 +380,7 @@ struct dst_node
66840 struct thread_pool *pool;
66841
66842 /* Transaction IDs live here */
66843 - atomic_long_t gen;
66844 + atomic_long_unchecked_t gen;
66845
66846 /*
66847 * How frequently and how many times transaction
66848 diff --git a/include/linux/elf.h b/include/linux/elf.h
66849 index 90a4ed0..d652617 100644
66850 --- a/include/linux/elf.h
66851 +++ b/include/linux/elf.h
66852 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
66853 #define PT_GNU_EH_FRAME 0x6474e550
66854
66855 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
66856 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
66857 +
66858 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
66859 +
66860 +/* Constants for the e_flags field */
66861 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66862 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
66863 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
66864 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
66865 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66866 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66867
66868 /* These constants define the different elf file types */
66869 #define ET_NONE 0
66870 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
66871 #define DT_DEBUG 21
66872 #define DT_TEXTREL 22
66873 #define DT_JMPREL 23
66874 +#define DT_FLAGS 30
66875 + #define DF_TEXTREL 0x00000004
66876 #define DT_ENCODING 32
66877 #define OLD_DT_LOOS 0x60000000
66878 #define DT_LOOS 0x6000000d
66879 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
66880 #define PF_W 0x2
66881 #define PF_X 0x1
66882
66883 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
66884 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
66885 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
66886 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
66887 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
66888 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
66889 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
66890 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
66891 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
66892 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
66893 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
66894 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
66895 +
66896 typedef struct elf32_phdr{
66897 Elf32_Word p_type;
66898 Elf32_Off p_offset;
66899 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
66900 #define EI_OSABI 7
66901 #define EI_PAD 8
66902
66903 +#define EI_PAX 14
66904 +
66905 #define ELFMAG0 0x7f /* EI_MAG */
66906 #define ELFMAG1 'E'
66907 #define ELFMAG2 'L'
66908 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
66909 #define elf_phdr elf32_phdr
66910 #define elf_note elf32_note
66911 #define elf_addr_t Elf32_Off
66912 +#define elf_dyn Elf32_Dyn
66913
66914 #else
66915
66916 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
66917 #define elf_phdr elf64_phdr
66918 #define elf_note elf64_note
66919 #define elf_addr_t Elf64_Off
66920 +#define elf_dyn Elf64_Dyn
66921
66922 #endif
66923
66924 diff --git a/include/linux/fs.h b/include/linux/fs.h
66925 index 1b9a47a..6fe2934 100644
66926 --- a/include/linux/fs.h
66927 +++ b/include/linux/fs.h
66928 @@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
66929 unsigned long, unsigned long);
66930
66931 struct address_space_operations {
66932 - int (*writepage)(struct page *page, struct writeback_control *wbc);
66933 - int (*readpage)(struct file *, struct page *);
66934 - void (*sync_page)(struct page *);
66935 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
66936 + int (* const readpage)(struct file *, struct page *);
66937 + void (* const sync_page)(struct page *);
66938
66939 /* Write back some dirty pages from this mapping. */
66940 - int (*writepages)(struct address_space *, struct writeback_control *);
66941 + int (* const writepages)(struct address_space *, struct writeback_control *);
66942
66943 /* Set a page dirty. Return true if this dirtied it */
66944 - int (*set_page_dirty)(struct page *page);
66945 + int (* const set_page_dirty)(struct page *page);
66946
66947 - int (*readpages)(struct file *filp, struct address_space *mapping,
66948 + int (* const readpages)(struct file *filp, struct address_space *mapping,
66949 struct list_head *pages, unsigned nr_pages);
66950
66951 - int (*write_begin)(struct file *, struct address_space *mapping,
66952 + int (* const write_begin)(struct file *, struct address_space *mapping,
66953 loff_t pos, unsigned len, unsigned flags,
66954 struct page **pagep, void **fsdata);
66955 - int (*write_end)(struct file *, struct address_space *mapping,
66956 + int (* const write_end)(struct file *, struct address_space *mapping,
66957 loff_t pos, unsigned len, unsigned copied,
66958 struct page *page, void *fsdata);
66959
66960 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
66961 - sector_t (*bmap)(struct address_space *, sector_t);
66962 - void (*invalidatepage) (struct page *, unsigned long);
66963 - int (*releasepage) (struct page *, gfp_t);
66964 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
66965 + sector_t (* const bmap)(struct address_space *, sector_t);
66966 + void (* const invalidatepage) (struct page *, unsigned long);
66967 + int (* const releasepage) (struct page *, gfp_t);
66968 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
66969 loff_t offset, unsigned long nr_segs);
66970 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
66971 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
66972 void **, unsigned long *);
66973 /* migrate the contents of a page to the specified target */
66974 - int (*migratepage) (struct address_space *,
66975 + int (* const migratepage) (struct address_space *,
66976 struct page *, struct page *);
66977 - int (*launder_page) (struct page *);
66978 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
66979 + int (* const launder_page) (struct page *);
66980 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
66981 unsigned long);
66982 - int (*error_remove_page)(struct address_space *, struct page *);
66983 + int (* const error_remove_page)(struct address_space *, struct page *);
66984 };
66985
66986 /*
66987 @@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
66988 typedef struct files_struct *fl_owner_t;
66989
66990 struct file_lock_operations {
66991 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66992 - void (*fl_release_private)(struct file_lock *);
66993 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66994 + void (* const fl_release_private)(struct file_lock *);
66995 };
66996
66997 struct lock_manager_operations {
66998 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
66999 - void (*fl_notify)(struct file_lock *); /* unblock callback */
67000 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
67001 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
67002 - void (*fl_release_private)(struct file_lock *);
67003 - void (*fl_break)(struct file_lock *);
67004 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
67005 - int (*fl_change)(struct file_lock **, int);
67006 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
67007 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
67008 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
67009 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
67010 + void (* const fl_release_private)(struct file_lock *);
67011 + void (* const fl_break)(struct file_lock *);
67012 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
67013 + int (* const fl_change)(struct file_lock **, int);
67014 };
67015
67016 struct lock_manager {
67017 @@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
67018 unsigned int fi_flags; /* Flags as passed from user */
67019 unsigned int fi_extents_mapped; /* Number of mapped extents */
67020 unsigned int fi_extents_max; /* Size of fiemap_extent array */
67021 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
67022 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
67023 * array */
67024 };
67025 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
67026 @@ -1512,7 +1512,8 @@ struct file_operations {
67027 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
67028 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
67029 int (*setlease)(struct file *, long, struct file_lock **);
67030 -};
67031 +} __do_const;
67032 +typedef struct file_operations __no_const file_operations_no_const;
67033
67034 struct inode_operations {
67035 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
67036 @@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
67037 unsigned long, loff_t *);
67038
67039 struct super_operations {
67040 - struct inode *(*alloc_inode)(struct super_block *sb);
67041 - void (*destroy_inode)(struct inode *);
67042 + struct inode *(* const alloc_inode)(struct super_block *sb);
67043 + void (* const destroy_inode)(struct inode *);
67044
67045 - void (*dirty_inode) (struct inode *);
67046 - int (*write_inode) (struct inode *, int);
67047 - void (*drop_inode) (struct inode *);
67048 - void (*delete_inode) (struct inode *);
67049 - void (*put_super) (struct super_block *);
67050 - void (*write_super) (struct super_block *);
67051 - int (*sync_fs)(struct super_block *sb, int wait);
67052 - int (*freeze_fs) (struct super_block *);
67053 - int (*unfreeze_fs) (struct super_block *);
67054 - int (*statfs) (struct dentry *, struct kstatfs *);
67055 - int (*remount_fs) (struct super_block *, int *, char *);
67056 - void (*clear_inode) (struct inode *);
67057 - void (*umount_begin) (struct super_block *);
67058 + void (* const dirty_inode) (struct inode *);
67059 + int (* const write_inode) (struct inode *, int);
67060 + void (* const drop_inode) (struct inode *);
67061 + void (* const delete_inode) (struct inode *);
67062 + void (* const put_super) (struct super_block *);
67063 + void (* const write_super) (struct super_block *);
67064 + int (* const sync_fs)(struct super_block *sb, int wait);
67065 + int (* const freeze_fs) (struct super_block *);
67066 + int (* const unfreeze_fs) (struct super_block *);
67067 + int (* const statfs) (struct dentry *, struct kstatfs *);
67068 + int (* const remount_fs) (struct super_block *, int *, char *);
67069 + void (* const clear_inode) (struct inode *);
67070 + void (* const umount_begin) (struct super_block *);
67071
67072 - int (*show_options)(struct seq_file *, struct vfsmount *);
67073 - int (*show_stats)(struct seq_file *, struct vfsmount *);
67074 + int (* const show_options)(struct seq_file *, struct vfsmount *);
67075 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
67076 #ifdef CONFIG_QUOTA
67077 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
67078 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
67079 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
67080 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
67081 #endif
67082 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
67083 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
67084 };
67085
67086 /*
67087 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
67088 index 78a05bf..2a7d3e1 100644
67089 --- a/include/linux/fs_struct.h
67090 +++ b/include/linux/fs_struct.h
67091 @@ -4,7 +4,7 @@
67092 #include <linux/path.h>
67093
67094 struct fs_struct {
67095 - int users;
67096 + atomic_t users;
67097 rwlock_t lock;
67098 int umask;
67099 int in_exec;
67100 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
67101 index 7be0c6f..2f63a2b 100644
67102 --- a/include/linux/fscache-cache.h
67103 +++ b/include/linux/fscache-cache.h
67104 @@ -116,7 +116,7 @@ struct fscache_operation {
67105 #endif
67106 };
67107
67108 -extern atomic_t fscache_op_debug_id;
67109 +extern atomic_unchecked_t fscache_op_debug_id;
67110 extern const struct slow_work_ops fscache_op_slow_work_ops;
67111
67112 extern void fscache_enqueue_operation(struct fscache_operation *);
67113 @@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
67114 fscache_operation_release_t release)
67115 {
67116 atomic_set(&op->usage, 1);
67117 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
67118 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
67119 op->release = release;
67120 INIT_LIST_HEAD(&op->pend_link);
67121 fscache_set_op_state(op, "Init");
67122 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
67123 index 4d6f47b..00bcedb 100644
67124 --- a/include/linux/fsnotify_backend.h
67125 +++ b/include/linux/fsnotify_backend.h
67126 @@ -86,6 +86,7 @@ struct fsnotify_ops {
67127 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
67128 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
67129 };
67130 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
67131
67132 /*
67133 * A group is a "thing" that wants to receive notification about filesystem
67134 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
67135 index 4ec5e67..42f1eb9 100644
67136 --- a/include/linux/ftrace_event.h
67137 +++ b/include/linux/ftrace_event.h
67138 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
67139 int filter_type);
67140 extern int trace_define_common_fields(struct ftrace_event_call *call);
67141
67142 -#define is_signed_type(type) (((type)(-1)) < 0)
67143 +#define is_signed_type(type) (((type)(-1)) < (type)1)
67144
67145 int trace_set_clr_event(const char *system, const char *event, int set);
67146
67147 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
67148 index 297df45..b6a74ff 100644
67149 --- a/include/linux/genhd.h
67150 +++ b/include/linux/genhd.h
67151 @@ -161,7 +161,7 @@ struct gendisk {
67152
67153 struct timer_rand_state *random;
67154
67155 - atomic_t sync_io; /* RAID */
67156 + atomic_unchecked_t sync_io; /* RAID */
67157 struct work_struct async_notify;
67158 #ifdef CONFIG_BLK_DEV_INTEGRITY
67159 struct blk_integrity *integrity;
67160 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
67161 new file mode 100644
67162 index 0000000..af663cf
67163 --- /dev/null
67164 +++ b/include/linux/gracl.h
67165 @@ -0,0 +1,319 @@
67166 +#ifndef GR_ACL_H
67167 +#define GR_ACL_H
67168 +
67169 +#include <linux/grdefs.h>
67170 +#include <linux/resource.h>
67171 +#include <linux/capability.h>
67172 +#include <linux/dcache.h>
67173 +#include <asm/resource.h>
67174 +
67175 +/* Major status information */
67176 +
67177 +#define GR_VERSION "grsecurity 2.9"
67178 +#define GRSECURITY_VERSION 0x2900
67179 +
67180 +enum {
67181 + GR_SHUTDOWN = 0,
67182 + GR_ENABLE = 1,
67183 + GR_SPROLE = 2,
67184 + GR_RELOAD = 3,
67185 + GR_SEGVMOD = 4,
67186 + GR_STATUS = 5,
67187 + GR_UNSPROLE = 6,
67188 + GR_PASSSET = 7,
67189 + GR_SPROLEPAM = 8,
67190 +};
67191 +
67192 +/* Password setup definitions
67193 + * kernel/grhash.c */
67194 +enum {
67195 + GR_PW_LEN = 128,
67196 + GR_SALT_LEN = 16,
67197 + GR_SHA_LEN = 32,
67198 +};
67199 +
67200 +enum {
67201 + GR_SPROLE_LEN = 64,
67202 +};
67203 +
67204 +enum {
67205 + GR_NO_GLOB = 0,
67206 + GR_REG_GLOB,
67207 + GR_CREATE_GLOB
67208 +};
67209 +
67210 +#define GR_NLIMITS 32
67211 +
67212 +/* Begin Data Structures */
67213 +
67214 +struct sprole_pw {
67215 + unsigned char *rolename;
67216 + unsigned char salt[GR_SALT_LEN];
67217 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
67218 +};
67219 +
67220 +struct name_entry {
67221 + __u32 key;
67222 + ino_t inode;
67223 + dev_t device;
67224 + char *name;
67225 + __u16 len;
67226 + __u8 deleted;
67227 + struct name_entry *prev;
67228 + struct name_entry *next;
67229 +};
67230 +
67231 +struct inodev_entry {
67232 + struct name_entry *nentry;
67233 + struct inodev_entry *prev;
67234 + struct inodev_entry *next;
67235 +};
67236 +
67237 +struct acl_role_db {
67238 + struct acl_role_label **r_hash;
67239 + __u32 r_size;
67240 +};
67241 +
67242 +struct inodev_db {
67243 + struct inodev_entry **i_hash;
67244 + __u32 i_size;
67245 +};
67246 +
67247 +struct name_db {
67248 + struct name_entry **n_hash;
67249 + __u32 n_size;
67250 +};
67251 +
67252 +struct crash_uid {
67253 + uid_t uid;
67254 + unsigned long expires;
67255 +};
67256 +
67257 +struct gr_hash_struct {
67258 + void **table;
67259 + void **nametable;
67260 + void *first;
67261 + __u32 table_size;
67262 + __u32 used_size;
67263 + int type;
67264 +};
67265 +
67266 +/* Userspace Grsecurity ACL data structures */
67267 +
67268 +struct acl_subject_label {
67269 + char *filename;
67270 + ino_t inode;
67271 + dev_t device;
67272 + __u32 mode;
67273 + kernel_cap_t cap_mask;
67274 + kernel_cap_t cap_lower;
67275 + kernel_cap_t cap_invert_audit;
67276 +
67277 + struct rlimit res[GR_NLIMITS];
67278 + __u32 resmask;
67279 +
67280 + __u8 user_trans_type;
67281 + __u8 group_trans_type;
67282 + uid_t *user_transitions;
67283 + gid_t *group_transitions;
67284 + __u16 user_trans_num;
67285 + __u16 group_trans_num;
67286 +
67287 + __u32 sock_families[2];
67288 + __u32 ip_proto[8];
67289 + __u32 ip_type;
67290 + struct acl_ip_label **ips;
67291 + __u32 ip_num;
67292 + __u32 inaddr_any_override;
67293 +
67294 + __u32 crashes;
67295 + unsigned long expires;
67296 +
67297 + struct acl_subject_label *parent_subject;
67298 + struct gr_hash_struct *hash;
67299 + struct acl_subject_label *prev;
67300 + struct acl_subject_label *next;
67301 +
67302 + struct acl_object_label **obj_hash;
67303 + __u32 obj_hash_size;
67304 + __u16 pax_flags;
67305 +};
67306 +
67307 +struct role_allowed_ip {
67308 + __u32 addr;
67309 + __u32 netmask;
67310 +
67311 + struct role_allowed_ip *prev;
67312 + struct role_allowed_ip *next;
67313 +};
67314 +
67315 +struct role_transition {
67316 + char *rolename;
67317 +
67318 + struct role_transition *prev;
67319 + struct role_transition *next;
67320 +};
67321 +
67322 +struct acl_role_label {
67323 + char *rolename;
67324 + uid_t uidgid;
67325 + __u16 roletype;
67326 +
67327 + __u16 auth_attempts;
67328 + unsigned long expires;
67329 +
67330 + struct acl_subject_label *root_label;
67331 + struct gr_hash_struct *hash;
67332 +
67333 + struct acl_role_label *prev;
67334 + struct acl_role_label *next;
67335 +
67336 + struct role_transition *transitions;
67337 + struct role_allowed_ip *allowed_ips;
67338 + uid_t *domain_children;
67339 + __u16 domain_child_num;
67340 +
67341 + mode_t umask;
67342 +
67343 + struct acl_subject_label **subj_hash;
67344 + __u32 subj_hash_size;
67345 +};
67346 +
67347 +struct user_acl_role_db {
67348 + struct acl_role_label **r_table;
67349 + __u32 num_pointers; /* Number of allocations to track */
67350 + __u32 num_roles; /* Number of roles */
67351 + __u32 num_domain_children; /* Number of domain children */
67352 + __u32 num_subjects; /* Number of subjects */
67353 + __u32 num_objects; /* Number of objects */
67354 +};
67355 +
67356 +struct acl_object_label {
67357 + char *filename;
67358 + ino_t inode;
67359 + dev_t device;
67360 + __u32 mode;
67361 +
67362 + struct acl_subject_label *nested;
67363 + struct acl_object_label *globbed;
67364 +
67365 + /* next two structures not used */
67366 +
67367 + struct acl_object_label *prev;
67368 + struct acl_object_label *next;
67369 +};
67370 +
67371 +struct acl_ip_label {
67372 + char *iface;
67373 + __u32 addr;
67374 + __u32 netmask;
67375 + __u16 low, high;
67376 + __u8 mode;
67377 + __u32 type;
67378 + __u32 proto[8];
67379 +
67380 + /* next two structures not used */
67381 +
67382 + struct acl_ip_label *prev;
67383 + struct acl_ip_label *next;
67384 +};
67385 +
67386 +struct gr_arg {
67387 + struct user_acl_role_db role_db;
67388 + unsigned char pw[GR_PW_LEN];
67389 + unsigned char salt[GR_SALT_LEN];
67390 + unsigned char sum[GR_SHA_LEN];
67391 + unsigned char sp_role[GR_SPROLE_LEN];
67392 + struct sprole_pw *sprole_pws;
67393 + dev_t segv_device;
67394 + ino_t segv_inode;
67395 + uid_t segv_uid;
67396 + __u16 num_sprole_pws;
67397 + __u16 mode;
67398 +};
67399 +
67400 +struct gr_arg_wrapper {
67401 + struct gr_arg *arg;
67402 + __u32 version;
67403 + __u32 size;
67404 +};
67405 +
67406 +struct subject_map {
67407 + struct acl_subject_label *user;
67408 + struct acl_subject_label *kernel;
67409 + struct subject_map *prev;
67410 + struct subject_map *next;
67411 +};
67412 +
67413 +struct acl_subj_map_db {
67414 + struct subject_map **s_hash;
67415 + __u32 s_size;
67416 +};
67417 +
67418 +/* End Data Structures Section */
67419 +
67420 +/* Hash functions generated by empirical testing by Brad Spengler
67421 + Makes good use of the low bits of the inode. Generally 0-1 times
67422 + in loop for successful match. 0-3 for unsuccessful match.
67423 + Shift/add algorithm with modulus of table size and an XOR*/
67424 +
67425 +static __inline__ unsigned int
67426 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
67427 +{
67428 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
67429 +}
67430 +
67431 + static __inline__ unsigned int
67432 +shash(const struct acl_subject_label *userp, const unsigned int sz)
67433 +{
67434 + return ((const unsigned long)userp % sz);
67435 +}
67436 +
67437 +static __inline__ unsigned int
67438 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
67439 +{
67440 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
67441 +}
67442 +
67443 +static __inline__ unsigned int
67444 +nhash(const char *name, const __u16 len, const unsigned int sz)
67445 +{
67446 + return full_name_hash((const unsigned char *)name, len) % sz;
67447 +}
67448 +
67449 +#define FOR_EACH_ROLE_START(role) \
67450 + role = role_list; \
67451 + while (role) {
67452 +
67453 +#define FOR_EACH_ROLE_END(role) \
67454 + role = role->prev; \
67455 + }
67456 +
67457 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
67458 + subj = NULL; \
67459 + iter = 0; \
67460 + while (iter < role->subj_hash_size) { \
67461 + if (subj == NULL) \
67462 + subj = role->subj_hash[iter]; \
67463 + if (subj == NULL) { \
67464 + iter++; \
67465 + continue; \
67466 + }
67467 +
67468 +#define FOR_EACH_SUBJECT_END(subj,iter) \
67469 + subj = subj->next; \
67470 + if (subj == NULL) \
67471 + iter++; \
67472 + }
67473 +
67474 +
67475 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
67476 + subj = role->hash->first; \
67477 + while (subj != NULL) {
67478 +
67479 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
67480 + subj = subj->next; \
67481 + }
67482 +
67483 +#endif
67484 +
67485 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
67486 new file mode 100644
67487 index 0000000..323ecf2
67488 --- /dev/null
67489 +++ b/include/linux/gralloc.h
67490 @@ -0,0 +1,9 @@
67491 +#ifndef __GRALLOC_H
67492 +#define __GRALLOC_H
67493 +
67494 +void acl_free_all(void);
67495 +int acl_alloc_stack_init(unsigned long size);
67496 +void *acl_alloc(unsigned long len);
67497 +void *acl_alloc_num(unsigned long num, unsigned long len);
67498 +
67499 +#endif
67500 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
67501 new file mode 100644
67502 index 0000000..70d6cd5
67503 --- /dev/null
67504 +++ b/include/linux/grdefs.h
67505 @@ -0,0 +1,140 @@
67506 +#ifndef GRDEFS_H
67507 +#define GRDEFS_H
67508 +
67509 +/* Begin grsecurity status declarations */
67510 +
67511 +enum {
67512 + GR_READY = 0x01,
67513 + GR_STATUS_INIT = 0x00 // disabled state
67514 +};
67515 +
67516 +/* Begin ACL declarations */
67517 +
67518 +/* Role flags */
67519 +
67520 +enum {
67521 + GR_ROLE_USER = 0x0001,
67522 + GR_ROLE_GROUP = 0x0002,
67523 + GR_ROLE_DEFAULT = 0x0004,
67524 + GR_ROLE_SPECIAL = 0x0008,
67525 + GR_ROLE_AUTH = 0x0010,
67526 + GR_ROLE_NOPW = 0x0020,
67527 + GR_ROLE_GOD = 0x0040,
67528 + GR_ROLE_LEARN = 0x0080,
67529 + GR_ROLE_TPE = 0x0100,
67530 + GR_ROLE_DOMAIN = 0x0200,
67531 + GR_ROLE_PAM = 0x0400,
67532 + GR_ROLE_PERSIST = 0x800
67533 +};
67534 +
67535 +/* ACL Subject and Object mode flags */
67536 +enum {
67537 + GR_DELETED = 0x80000000
67538 +};
67539 +
67540 +/* ACL Object-only mode flags */
67541 +enum {
67542 + GR_READ = 0x00000001,
67543 + GR_APPEND = 0x00000002,
67544 + GR_WRITE = 0x00000004,
67545 + GR_EXEC = 0x00000008,
67546 + GR_FIND = 0x00000010,
67547 + GR_INHERIT = 0x00000020,
67548 + GR_SETID = 0x00000040,
67549 + GR_CREATE = 0x00000080,
67550 + GR_DELETE = 0x00000100,
67551 + GR_LINK = 0x00000200,
67552 + GR_AUDIT_READ = 0x00000400,
67553 + GR_AUDIT_APPEND = 0x00000800,
67554 + GR_AUDIT_WRITE = 0x00001000,
67555 + GR_AUDIT_EXEC = 0x00002000,
67556 + GR_AUDIT_FIND = 0x00004000,
67557 + GR_AUDIT_INHERIT= 0x00008000,
67558 + GR_AUDIT_SETID = 0x00010000,
67559 + GR_AUDIT_CREATE = 0x00020000,
67560 + GR_AUDIT_DELETE = 0x00040000,
67561 + GR_AUDIT_LINK = 0x00080000,
67562 + GR_PTRACERD = 0x00100000,
67563 + GR_NOPTRACE = 0x00200000,
67564 + GR_SUPPRESS = 0x00400000,
67565 + GR_NOLEARN = 0x00800000,
67566 + GR_INIT_TRANSFER= 0x01000000
67567 +};
67568 +
67569 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
67570 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
67571 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
67572 +
67573 +/* ACL subject-only mode flags */
67574 +enum {
67575 + GR_KILL = 0x00000001,
67576 + GR_VIEW = 0x00000002,
67577 + GR_PROTECTED = 0x00000004,
67578 + GR_LEARN = 0x00000008,
67579 + GR_OVERRIDE = 0x00000010,
67580 + /* just a placeholder, this mode is only used in userspace */
67581 + GR_DUMMY = 0x00000020,
67582 + GR_PROTSHM = 0x00000040,
67583 + GR_KILLPROC = 0x00000080,
67584 + GR_KILLIPPROC = 0x00000100,
67585 + /* just a placeholder, this mode is only used in userspace */
67586 + GR_NOTROJAN = 0x00000200,
67587 + GR_PROTPROCFD = 0x00000400,
67588 + GR_PROCACCT = 0x00000800,
67589 + GR_RELAXPTRACE = 0x00001000,
67590 + GR_NESTED = 0x00002000,
67591 + GR_INHERITLEARN = 0x00004000,
67592 + GR_PROCFIND = 0x00008000,
67593 + GR_POVERRIDE = 0x00010000,
67594 + GR_KERNELAUTH = 0x00020000,
67595 + GR_ATSECURE = 0x00040000,
67596 + GR_SHMEXEC = 0x00080000
67597 +};
67598 +
67599 +enum {
67600 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
67601 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
67602 + GR_PAX_ENABLE_MPROTECT = 0x0004,
67603 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
67604 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
67605 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
67606 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
67607 + GR_PAX_DISABLE_MPROTECT = 0x0400,
67608 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
67609 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
67610 +};
67611 +
67612 +enum {
67613 + GR_ID_USER = 0x01,
67614 + GR_ID_GROUP = 0x02,
67615 +};
67616 +
67617 +enum {
67618 + GR_ID_ALLOW = 0x01,
67619 + GR_ID_DENY = 0x02,
67620 +};
67621 +
67622 +#define GR_CRASH_RES 31
67623 +#define GR_UIDTABLE_MAX 500
67624 +
67625 +/* begin resource learning section */
67626 +enum {
67627 + GR_RLIM_CPU_BUMP = 60,
67628 + GR_RLIM_FSIZE_BUMP = 50000,
67629 + GR_RLIM_DATA_BUMP = 10000,
67630 + GR_RLIM_STACK_BUMP = 1000,
67631 + GR_RLIM_CORE_BUMP = 10000,
67632 + GR_RLIM_RSS_BUMP = 500000,
67633 + GR_RLIM_NPROC_BUMP = 1,
67634 + GR_RLIM_NOFILE_BUMP = 5,
67635 + GR_RLIM_MEMLOCK_BUMP = 50000,
67636 + GR_RLIM_AS_BUMP = 500000,
67637 + GR_RLIM_LOCKS_BUMP = 2,
67638 + GR_RLIM_SIGPENDING_BUMP = 5,
67639 + GR_RLIM_MSGQUEUE_BUMP = 10000,
67640 + GR_RLIM_NICE_BUMP = 1,
67641 + GR_RLIM_RTPRIO_BUMP = 1,
67642 + GR_RLIM_RTTIME_BUMP = 1000000
67643 +};
67644 +
67645 +#endif
67646 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
67647 new file mode 100644
67648 index 0000000..3826b91
67649 --- /dev/null
67650 +++ b/include/linux/grinternal.h
67651 @@ -0,0 +1,219 @@
67652 +#ifndef __GRINTERNAL_H
67653 +#define __GRINTERNAL_H
67654 +
67655 +#ifdef CONFIG_GRKERNSEC
67656 +
67657 +#include <linux/fs.h>
67658 +#include <linux/mnt_namespace.h>
67659 +#include <linux/nsproxy.h>
67660 +#include <linux/gracl.h>
67661 +#include <linux/grdefs.h>
67662 +#include <linux/grmsg.h>
67663 +
67664 +void gr_add_learn_entry(const char *fmt, ...)
67665 + __attribute__ ((format (printf, 1, 2)));
67666 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
67667 + const struct vfsmount *mnt);
67668 +__u32 gr_check_create(const struct dentry *new_dentry,
67669 + const struct dentry *parent,
67670 + const struct vfsmount *mnt, const __u32 mode);
67671 +int gr_check_protected_task(const struct task_struct *task);
67672 +__u32 to_gr_audit(const __u32 reqmode);
67673 +int gr_set_acls(const int type);
67674 +int gr_apply_subject_to_task(struct task_struct *task);
67675 +int gr_acl_is_enabled(void);
67676 +char gr_roletype_to_char(void);
67677 +
67678 +void gr_handle_alertkill(struct task_struct *task);
67679 +char *gr_to_filename(const struct dentry *dentry,
67680 + const struct vfsmount *mnt);
67681 +char *gr_to_filename1(const struct dentry *dentry,
67682 + const struct vfsmount *mnt);
67683 +char *gr_to_filename2(const struct dentry *dentry,
67684 + const struct vfsmount *mnt);
67685 +char *gr_to_filename3(const struct dentry *dentry,
67686 + const struct vfsmount *mnt);
67687 +
67688 +extern int grsec_enable_ptrace_readexec;
67689 +extern int grsec_enable_harden_ptrace;
67690 +extern int grsec_enable_link;
67691 +extern int grsec_enable_fifo;
67692 +extern int grsec_enable_shm;
67693 +extern int grsec_enable_execlog;
67694 +extern int grsec_enable_signal;
67695 +extern int grsec_enable_audit_ptrace;
67696 +extern int grsec_enable_forkfail;
67697 +extern int grsec_enable_time;
67698 +extern int grsec_enable_rofs;
67699 +extern int grsec_enable_chroot_shmat;
67700 +extern int grsec_enable_chroot_mount;
67701 +extern int grsec_enable_chroot_double;
67702 +extern int grsec_enable_chroot_pivot;
67703 +extern int grsec_enable_chroot_chdir;
67704 +extern int grsec_enable_chroot_chmod;
67705 +extern int grsec_enable_chroot_mknod;
67706 +extern int grsec_enable_chroot_fchdir;
67707 +extern int grsec_enable_chroot_nice;
67708 +extern int grsec_enable_chroot_execlog;
67709 +extern int grsec_enable_chroot_caps;
67710 +extern int grsec_enable_chroot_sysctl;
67711 +extern int grsec_enable_chroot_unix;
67712 +extern int grsec_enable_tpe;
67713 +extern int grsec_tpe_gid;
67714 +extern int grsec_enable_tpe_all;
67715 +extern int grsec_enable_tpe_invert;
67716 +extern int grsec_enable_socket_all;
67717 +extern int grsec_socket_all_gid;
67718 +extern int grsec_enable_socket_client;
67719 +extern int grsec_socket_client_gid;
67720 +extern int grsec_enable_socket_server;
67721 +extern int grsec_socket_server_gid;
67722 +extern int grsec_audit_gid;
67723 +extern int grsec_enable_group;
67724 +extern int grsec_enable_audit_textrel;
67725 +extern int grsec_enable_log_rwxmaps;
67726 +extern int grsec_enable_mount;
67727 +extern int grsec_enable_chdir;
67728 +extern int grsec_resource_logging;
67729 +extern int grsec_enable_blackhole;
67730 +extern int grsec_lastack_retries;
67731 +extern int grsec_enable_brute;
67732 +extern int grsec_lock;
67733 +
67734 +extern spinlock_t grsec_alert_lock;
67735 +extern unsigned long grsec_alert_wtime;
67736 +extern unsigned long grsec_alert_fyet;
67737 +
67738 +extern spinlock_t grsec_audit_lock;
67739 +
67740 +extern rwlock_t grsec_exec_file_lock;
67741 +
67742 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
67743 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
67744 + (tsk)->exec_file->f_vfsmnt) : "/")
67745 +
67746 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
67747 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
67748 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67749 +
67750 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
67751 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
67752 + (tsk)->exec_file->f_vfsmnt) : "/")
67753 +
67754 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
67755 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
67756 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67757 +
67758 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
67759 +
67760 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
67761 +
67762 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
67763 + (task)->pid, (cred)->uid, \
67764 + (cred)->euid, (cred)->gid, (cred)->egid, \
67765 + gr_parent_task_fullpath(task), \
67766 + (task)->real_parent->comm, (task)->real_parent->pid, \
67767 + (pcred)->uid, (pcred)->euid, \
67768 + (pcred)->gid, (pcred)->egid
67769 +
67770 +#define GR_CHROOT_CAPS {{ \
67771 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
67772 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
67773 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
67774 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
67775 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
67776 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
67777 + CAP_TO_MASK(CAP_MAC_ADMIN) }}
67778 +
67779 +#define security_learn(normal_msg,args...) \
67780 +({ \
67781 + read_lock(&grsec_exec_file_lock); \
67782 + gr_add_learn_entry(normal_msg "\n", ## args); \
67783 + read_unlock(&grsec_exec_file_lock); \
67784 +})
67785 +
67786 +enum {
67787 + GR_DO_AUDIT,
67788 + GR_DONT_AUDIT,
67789 + GR_DONT_AUDIT_GOOD
67790 +};
67791 +
67792 +enum {
67793 + GR_TTYSNIFF,
67794 + GR_RBAC,
67795 + GR_RBAC_STR,
67796 + GR_STR_RBAC,
67797 + GR_RBAC_MODE2,
67798 + GR_RBAC_MODE3,
67799 + GR_FILENAME,
67800 + GR_SYSCTL_HIDDEN,
67801 + GR_NOARGS,
67802 + GR_ONE_INT,
67803 + GR_ONE_INT_TWO_STR,
67804 + GR_ONE_STR,
67805 + GR_STR_INT,
67806 + GR_TWO_STR_INT,
67807 + GR_TWO_INT,
67808 + GR_TWO_U64,
67809 + GR_THREE_INT,
67810 + GR_FIVE_INT_TWO_STR,
67811 + GR_TWO_STR,
67812 + GR_THREE_STR,
67813 + GR_FOUR_STR,
67814 + GR_STR_FILENAME,
67815 + GR_FILENAME_STR,
67816 + GR_FILENAME_TWO_INT,
67817 + GR_FILENAME_TWO_INT_STR,
67818 + GR_TEXTREL,
67819 + GR_PTRACE,
67820 + GR_RESOURCE,
67821 + GR_CAP,
67822 + GR_SIG,
67823 + GR_SIG2,
67824 + GR_CRASH1,
67825 + GR_CRASH2,
67826 + GR_PSACCT,
67827 + GR_RWXMAP
67828 +};
67829 +
67830 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67831 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67832 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67833 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67834 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67835 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67836 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67837 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67838 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67839 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67840 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67841 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67842 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67843 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67844 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67845 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67846 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67847 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67848 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67849 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67850 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67851 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67852 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67853 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67854 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67855 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67856 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67857 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67858 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67859 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67860 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67861 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67862 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67863 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67864 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67865 +
67866 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67867 +
67868 +#endif
67869 +
67870 +#endif
67871 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67872 new file mode 100644
67873 index 0000000..f885406
67874 --- /dev/null
67875 +++ b/include/linux/grmsg.h
67876 @@ -0,0 +1,109 @@
67877 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67878 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67879 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67880 +#define GR_STOPMOD_MSG "denied modification of module state by "
67881 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67882 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67883 +#define GR_IOPERM_MSG "denied use of ioperm() by "
67884 +#define GR_IOPL_MSG "denied use of iopl() by "
67885 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67886 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67887 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67888 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67889 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67890 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67891 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67892 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67893 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67894 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67895 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67896 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67897 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67898 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67899 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67900 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67901 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67902 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67903 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67904 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67905 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67906 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67907 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67908 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67909 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67910 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67911 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
67912 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67913 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67914 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67915 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67916 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67917 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67918 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67919 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67920 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
67921 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
67922 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
67923 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
67924 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
67925 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
67926 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
67927 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
67928 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
67929 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
67930 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
67931 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
67932 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
67933 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
67934 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
67935 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
67936 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
67937 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
67938 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
67939 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
67940 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
67941 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
67942 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
67943 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
67944 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
67945 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
67946 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
67947 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
67948 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
67949 +#define GR_NICE_CHROOT_MSG "denied priority change by "
67950 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
67951 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
67952 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
67953 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
67954 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
67955 +#define GR_TIME_MSG "time set by "
67956 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
67957 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
67958 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
67959 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
67960 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
67961 +#define GR_BIND_MSG "denied bind() by "
67962 +#define GR_CONNECT_MSG "denied connect() by "
67963 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
67964 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
67965 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
67966 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
67967 +#define GR_CAP_ACL_MSG "use of %s denied for "
67968 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
67969 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
67970 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
67971 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
67972 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
67973 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
67974 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
67975 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
67976 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
67977 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
67978 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
67979 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
67980 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
67981 +#define GR_VM86_MSG "denied use of vm86 by "
67982 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
67983 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
67984 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
67985 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
67986 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
67987 new file mode 100644
67988 index 0000000..c1793ae
67989 --- /dev/null
67990 +++ b/include/linux/grsecurity.h
67991 @@ -0,0 +1,219 @@
67992 +#ifndef GR_SECURITY_H
67993 +#define GR_SECURITY_H
67994 +#include <linux/fs.h>
67995 +#include <linux/fs_struct.h>
67996 +#include <linux/binfmts.h>
67997 +#include <linux/gracl.h>
67998 +#include <linux/compat.h>
67999 +
68000 +/* notify of brain-dead configs */
68001 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68002 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
68003 +#endif
68004 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
68005 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
68006 +#endif
68007 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
68008 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
68009 +#endif
68010 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
68011 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
68012 +#endif
68013 +
68014 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
68015 +void gr_handle_brute_check(void);
68016 +void gr_handle_kernel_exploit(void);
68017 +int gr_process_user_ban(void);
68018 +
68019 +char gr_roletype_to_char(void);
68020 +
68021 +int gr_acl_enable_at_secure(void);
68022 +
68023 +int gr_check_user_change(int real, int effective, int fs);
68024 +int gr_check_group_change(int real, int effective, int fs);
68025 +
68026 +void gr_del_task_from_ip_table(struct task_struct *p);
68027 +
68028 +int gr_pid_is_chrooted(struct task_struct *p);
68029 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
68030 +int gr_handle_chroot_nice(void);
68031 +int gr_handle_chroot_sysctl(const int op);
68032 +int gr_handle_chroot_setpriority(struct task_struct *p,
68033 + const int niceval);
68034 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
68035 +int gr_handle_chroot_chroot(const struct dentry *dentry,
68036 + const struct vfsmount *mnt);
68037 +void gr_handle_chroot_chdir(struct path *path);
68038 +int gr_handle_chroot_chmod(const struct dentry *dentry,
68039 + const struct vfsmount *mnt, const int mode);
68040 +int gr_handle_chroot_mknod(const struct dentry *dentry,
68041 + const struct vfsmount *mnt, const int mode);
68042 +int gr_handle_chroot_mount(const struct dentry *dentry,
68043 + const struct vfsmount *mnt,
68044 + const char *dev_name);
68045 +int gr_handle_chroot_pivot(void);
68046 +int gr_handle_chroot_unix(const pid_t pid);
68047 +
68048 +int gr_handle_rawio(const struct inode *inode);
68049 +
68050 +void gr_handle_ioperm(void);
68051 +void gr_handle_iopl(void);
68052 +
68053 +umode_t gr_acl_umask(void);
68054 +
68055 +int gr_tpe_allow(const struct file *file);
68056 +
68057 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
68058 +void gr_clear_chroot_entries(struct task_struct *task);
68059 +
68060 +void gr_log_forkfail(const int retval);
68061 +void gr_log_timechange(void);
68062 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
68063 +void gr_log_chdir(const struct dentry *dentry,
68064 + const struct vfsmount *mnt);
68065 +void gr_log_chroot_exec(const struct dentry *dentry,
68066 + const struct vfsmount *mnt);
68067 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
68068 +#ifdef CONFIG_COMPAT
68069 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
68070 +#endif
68071 +void gr_log_remount(const char *devname, const int retval);
68072 +void gr_log_unmount(const char *devname, const int retval);
68073 +void gr_log_mount(const char *from, const char *to, const int retval);
68074 +void gr_log_textrel(struct vm_area_struct *vma);
68075 +void gr_log_rwxmmap(struct file *file);
68076 +void gr_log_rwxmprotect(struct file *file);
68077 +
68078 +int gr_handle_follow_link(const struct inode *parent,
68079 + const struct inode *inode,
68080 + const struct dentry *dentry,
68081 + const struct vfsmount *mnt);
68082 +int gr_handle_fifo(const struct dentry *dentry,
68083 + const struct vfsmount *mnt,
68084 + const struct dentry *dir, const int flag,
68085 + const int acc_mode);
68086 +int gr_handle_hardlink(const struct dentry *dentry,
68087 + const struct vfsmount *mnt,
68088 + struct inode *inode,
68089 + const int mode, const char *to);
68090 +
68091 +int gr_is_capable(const int cap);
68092 +int gr_is_capable_nolog(const int cap);
68093 +void gr_learn_resource(const struct task_struct *task, const int limit,
68094 + const unsigned long wanted, const int gt);
68095 +void gr_copy_label(struct task_struct *tsk);
68096 +void gr_handle_crash(struct task_struct *task, const int sig);
68097 +int gr_handle_signal(const struct task_struct *p, const int sig);
68098 +int gr_check_crash_uid(const uid_t uid);
68099 +int gr_check_protected_task(const struct task_struct *task);
68100 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
68101 +int gr_acl_handle_mmap(const struct file *file,
68102 + const unsigned long prot);
68103 +int gr_acl_handle_mprotect(const struct file *file,
68104 + const unsigned long prot);
68105 +int gr_check_hidden_task(const struct task_struct *tsk);
68106 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
68107 + const struct vfsmount *mnt);
68108 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
68109 + const struct vfsmount *mnt);
68110 +__u32 gr_acl_handle_access(const struct dentry *dentry,
68111 + const struct vfsmount *mnt, const int fmode);
68112 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
68113 + const struct vfsmount *mnt, umode_t *mode);
68114 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
68115 + const struct vfsmount *mnt);
68116 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
68117 + const struct vfsmount *mnt);
68118 +int gr_handle_ptrace(struct task_struct *task, const long request);
68119 +int gr_handle_proc_ptrace(struct task_struct *task);
68120 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
68121 + const struct vfsmount *mnt);
68122 +int gr_check_crash_exec(const struct file *filp);
68123 +int gr_acl_is_enabled(void);
68124 +void gr_set_kernel_label(struct task_struct *task);
68125 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
68126 + const gid_t gid);
68127 +int gr_set_proc_label(const struct dentry *dentry,
68128 + const struct vfsmount *mnt,
68129 + const int unsafe_flags);
68130 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
68131 + const struct vfsmount *mnt);
68132 +__u32 gr_acl_handle_open(const struct dentry *dentry,
68133 + const struct vfsmount *mnt, int acc_mode);
68134 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
68135 + const struct dentry *p_dentry,
68136 + const struct vfsmount *p_mnt,
68137 + int open_flags, int acc_mode, const int imode);
68138 +void gr_handle_create(const struct dentry *dentry,
68139 + const struct vfsmount *mnt);
68140 +void gr_handle_proc_create(const struct dentry *dentry,
68141 + const struct inode *inode);
68142 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
68143 + const struct dentry *parent_dentry,
68144 + const struct vfsmount *parent_mnt,
68145 + const int mode);
68146 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
68147 + const struct dentry *parent_dentry,
68148 + const struct vfsmount *parent_mnt);
68149 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
68150 + const struct vfsmount *mnt);
68151 +void gr_handle_delete(const ino_t ino, const dev_t dev);
68152 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
68153 + const struct vfsmount *mnt);
68154 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
68155 + const struct dentry *parent_dentry,
68156 + const struct vfsmount *parent_mnt,
68157 + const char *from);
68158 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
68159 + const struct dentry *parent_dentry,
68160 + const struct vfsmount *parent_mnt,
68161 + const struct dentry *old_dentry,
68162 + const struct vfsmount *old_mnt, const char *to);
68163 +int gr_acl_handle_rename(struct dentry *new_dentry,
68164 + struct dentry *parent_dentry,
68165 + const struct vfsmount *parent_mnt,
68166 + struct dentry *old_dentry,
68167 + struct inode *old_parent_inode,
68168 + struct vfsmount *old_mnt, const char *newname);
68169 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68170 + struct dentry *old_dentry,
68171 + struct dentry *new_dentry,
68172 + struct vfsmount *mnt, const __u8 replace);
68173 +__u32 gr_check_link(const struct dentry *new_dentry,
68174 + const struct dentry *parent_dentry,
68175 + const struct vfsmount *parent_mnt,
68176 + const struct dentry *old_dentry,
68177 + const struct vfsmount *old_mnt);
68178 +int gr_acl_handle_filldir(const struct file *file, const char *name,
68179 + const unsigned int namelen, const ino_t ino);
68180 +
68181 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
68182 + const struct vfsmount *mnt);
68183 +void gr_acl_handle_exit(void);
68184 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
68185 +int gr_acl_handle_procpidmem(const struct task_struct *task);
68186 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
68187 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
68188 +void gr_audit_ptrace(struct task_struct *task);
68189 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
68190 +
68191 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
68192 +
68193 +#ifdef CONFIG_GRKERNSEC
68194 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
68195 +void gr_handle_vm86(void);
68196 +void gr_handle_mem_readwrite(u64 from, u64 to);
68197 +
68198 +void gr_log_badprocpid(const char *entry);
68199 +
68200 +extern int grsec_enable_dmesg;
68201 +extern int grsec_disable_privio;
68202 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68203 +extern int grsec_enable_chroot_findtask;
68204 +#endif
68205 +#ifdef CONFIG_GRKERNSEC_SETXID
68206 +extern int grsec_enable_setxid;
68207 +#endif
68208 +#endif
68209 +
68210 +#endif
68211 diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
68212 index 6a87154..a3ce57b 100644
68213 --- a/include/linux/hdpu_features.h
68214 +++ b/include/linux/hdpu_features.h
68215 @@ -3,7 +3,7 @@
68216 struct cpustate_t {
68217 spinlock_t lock;
68218 int excl;
68219 - int open_count;
68220 + atomic_t open_count;
68221 unsigned char cached_val;
68222 int inited;
68223 unsigned long *set_addr;
68224 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
68225 index 211ff44..00ab6d7 100644
68226 --- a/include/linux/highmem.h
68227 +++ b/include/linux/highmem.h
68228 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
68229 kunmap_atomic(kaddr, KM_USER0);
68230 }
68231
68232 +static inline void sanitize_highpage(struct page *page)
68233 +{
68234 + void *kaddr;
68235 + unsigned long flags;
68236 +
68237 + local_irq_save(flags);
68238 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
68239 + clear_page(kaddr);
68240 + kunmap_atomic(kaddr, KM_CLEARPAGE);
68241 + local_irq_restore(flags);
68242 +}
68243 +
68244 static inline void zero_user_segments(struct page *page,
68245 unsigned start1, unsigned end1,
68246 unsigned start2, unsigned end2)
68247 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
68248 index 7b40cda..24eb44e 100644
68249 --- a/include/linux/i2c.h
68250 +++ b/include/linux/i2c.h
68251 @@ -325,6 +325,7 @@ struct i2c_algorithm {
68252 /* To determine what the adapter supports */
68253 u32 (*functionality) (struct i2c_adapter *);
68254 };
68255 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
68256
68257 /*
68258 * i2c_adapter is the structure used to identify a physical i2c bus along
68259 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
68260 index 4c4e57d..f3c5303 100644
68261 --- a/include/linux/i2o.h
68262 +++ b/include/linux/i2o.h
68263 @@ -564,7 +564,7 @@ struct i2o_controller {
68264 struct i2o_device *exec; /* Executive */
68265 #if BITS_PER_LONG == 64
68266 spinlock_t context_list_lock; /* lock for context_list */
68267 - atomic_t context_list_counter; /* needed for unique contexts */
68268 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
68269 struct list_head context_list; /* list of context id's
68270 and pointers */
68271 #endif
68272 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
68273 index 21a6f5d..dc42eab 100644
68274 --- a/include/linux/init_task.h
68275 +++ b/include/linux/init_task.h
68276 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
68277 #define INIT_IDS
68278 #endif
68279
68280 +#ifdef CONFIG_X86
68281 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
68282 +#else
68283 +#define INIT_TASK_THREAD_INFO
68284 +#endif
68285 +
68286 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
68287 /*
68288 * Because of the reduced scope of CAP_SETPCAP when filesystem
68289 @@ -156,6 +162,7 @@ extern struct cred init_cred;
68290 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
68291 .comm = "swapper", \
68292 .thread = INIT_THREAD, \
68293 + INIT_TASK_THREAD_INFO \
68294 .fs = &init_fs, \
68295 .files = &init_files, \
68296 .signal = &init_signals, \
68297 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
68298 index 4f0a72a..a849599 100644
68299 --- a/include/linux/intel-iommu.h
68300 +++ b/include/linux/intel-iommu.h
68301 @@ -296,7 +296,7 @@ struct iommu_flush {
68302 u8 fm, u64 type);
68303 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
68304 unsigned int size_order, u64 type);
68305 -};
68306 +} __no_const;
68307
68308 enum {
68309 SR_DMAR_FECTL_REG,
68310 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
68311 index c739150..be577b5 100644
68312 --- a/include/linux/interrupt.h
68313 +++ b/include/linux/interrupt.h
68314 @@ -369,7 +369,7 @@ enum
68315 /* map softirq index to softirq name. update 'softirq_to_name' in
68316 * kernel/softirq.c when adding a new softirq.
68317 */
68318 -extern char *softirq_to_name[NR_SOFTIRQS];
68319 +extern const char * const softirq_to_name[NR_SOFTIRQS];
68320
68321 /* softirq mask and active fields moved to irq_cpustat_t in
68322 * asm/hardirq.h to get better cache usage. KAO
68323 @@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
68324
68325 struct softirq_action
68326 {
68327 - void (*action)(struct softirq_action *);
68328 + void (*action)(void);
68329 };
68330
68331 asmlinkage void do_softirq(void);
68332 asmlinkage void __do_softirq(void);
68333 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
68334 +extern void open_softirq(int nr, void (*action)(void));
68335 extern void softirq_init(void);
68336 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
68337 extern void raise_softirq_irqoff(unsigned int nr);
68338 diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
68339 index eb73632..19abfc1 100644
68340 --- a/include/linux/iocontext.h
68341 +++ b/include/linux/iocontext.h
68342 @@ -94,14 +94,15 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
68343 return NULL;
68344 }
68345
68346 +struct task_struct;
68347 #ifdef CONFIG_BLOCK
68348 int put_io_context(struct io_context *ioc);
68349 -void exit_io_context(void);
68350 +void exit_io_context(struct task_struct *task);
68351 struct io_context *get_io_context(gfp_t gfp_flags, int node);
68352 struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
68353 void copy_io_context(struct io_context **pdst, struct io_context **psrc);
68354 #else
68355 -static inline void exit_io_context(void)
68356 +static inline void exit_io_context(struct task_struct *task)
68357 {
68358 }
68359
68360 diff --git a/include/linux/irq.h b/include/linux/irq.h
68361 index 9e5f45a..025865b 100644
68362 --- a/include/linux/irq.h
68363 +++ b/include/linux/irq.h
68364 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
68365 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
68366 bool boot)
68367 {
68368 +#ifdef CONFIG_CPUMASK_OFFSTACK
68369 gfp_t gfp = GFP_ATOMIC;
68370
68371 if (boot)
68372 gfp = GFP_NOWAIT;
68373
68374 -#ifdef CONFIG_CPUMASK_OFFSTACK
68375 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
68376 return false;
68377
68378 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
68379 index 7922742..27306a2 100644
68380 --- a/include/linux/kallsyms.h
68381 +++ b/include/linux/kallsyms.h
68382 @@ -15,7 +15,8 @@
68383
68384 struct module;
68385
68386 -#ifdef CONFIG_KALLSYMS
68387 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
68388 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
68389 /* Lookup the address for a symbol. Returns 0 if not found. */
68390 unsigned long kallsyms_lookup_name(const char *name);
68391
68392 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
68393 /* Stupid that this does nothing, but I didn't create this mess. */
68394 #define __print_symbol(fmt, addr)
68395 #endif /*CONFIG_KALLSYMS*/
68396 +#else /* when included by kallsyms.c, vsnprintf.c, or
68397 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
68398 +extern void __print_symbol(const char *fmt, unsigned long address);
68399 +extern int sprint_symbol(char *buffer, unsigned long address);
68400 +const char *kallsyms_lookup(unsigned long addr,
68401 + unsigned long *symbolsize,
68402 + unsigned long *offset,
68403 + char **modname, char *namebuf);
68404 +#endif
68405
68406 /* This macro allows us to keep printk typechecking */
68407 static void __check_printsym_format(const char *fmt, ...)
68408 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
68409 index 6adcc29..13369e8 100644
68410 --- a/include/linux/kgdb.h
68411 +++ b/include/linux/kgdb.h
68412 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
68413
68414 extern int kgdb_connected;
68415
68416 -extern atomic_t kgdb_setting_breakpoint;
68417 -extern atomic_t kgdb_cpu_doing_single_step;
68418 +extern atomic_unchecked_t kgdb_setting_breakpoint;
68419 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
68420
68421 extern struct task_struct *kgdb_usethread;
68422 extern struct task_struct *kgdb_contthread;
68423 @@ -235,7 +235,7 @@ struct kgdb_arch {
68424 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
68425 void (*remove_all_hw_break)(void);
68426 void (*correct_hw_break)(void);
68427 -};
68428 +} __do_const;
68429
68430 /**
68431 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
68432 @@ -257,14 +257,14 @@ struct kgdb_io {
68433 int (*init) (void);
68434 void (*pre_exception) (void);
68435 void (*post_exception) (void);
68436 -};
68437 +} __do_const;
68438
68439 -extern struct kgdb_arch arch_kgdb_ops;
68440 +extern const struct kgdb_arch arch_kgdb_ops;
68441
68442 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
68443
68444 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
68445 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
68446 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
68447 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
68448
68449 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
68450 extern int kgdb_mem2hex(char *mem, char *buf, int count);
68451 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
68452 index 0546fe7..2a22bc1 100644
68453 --- a/include/linux/kmod.h
68454 +++ b/include/linux/kmod.h
68455 @@ -31,6 +31,8 @@
68456 * usually useless though. */
68457 extern int __request_module(bool wait, const char *name, ...) \
68458 __attribute__((format(printf, 2, 3)));
68459 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
68460 + __attribute__((format(printf, 3, 4)));
68461 #define request_module(mod...) __request_module(true, mod)
68462 #define request_module_nowait(mod...) __request_module(false, mod)
68463 #define try_then_request_module(x, mod...) \
68464 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
68465 index 58ae8e0..3950d3c 100644
68466 --- a/include/linux/kobject.h
68467 +++ b/include/linux/kobject.h
68468 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
68469
68470 struct kobj_type {
68471 void (*release)(struct kobject *kobj);
68472 - struct sysfs_ops *sysfs_ops;
68473 + const struct sysfs_ops *sysfs_ops;
68474 struct attribute **default_attrs;
68475 };
68476
68477 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
68478 };
68479
68480 struct kset_uevent_ops {
68481 - int (*filter)(struct kset *kset, struct kobject *kobj);
68482 - const char *(*name)(struct kset *kset, struct kobject *kobj);
68483 - int (*uevent)(struct kset *kset, struct kobject *kobj,
68484 + int (* const filter)(struct kset *kset, struct kobject *kobj);
68485 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
68486 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
68487 struct kobj_uevent_env *env);
68488 };
68489
68490 @@ -132,7 +132,7 @@ struct kobj_attribute {
68491 const char *buf, size_t count);
68492 };
68493
68494 -extern struct sysfs_ops kobj_sysfs_ops;
68495 +extern const struct sysfs_ops kobj_sysfs_ops;
68496
68497 /**
68498 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
68499 @@ -155,14 +155,14 @@ struct kset {
68500 struct list_head list;
68501 spinlock_t list_lock;
68502 struct kobject kobj;
68503 - struct kset_uevent_ops *uevent_ops;
68504 + const struct kset_uevent_ops *uevent_ops;
68505 };
68506
68507 extern void kset_init(struct kset *kset);
68508 extern int __must_check kset_register(struct kset *kset);
68509 extern void kset_unregister(struct kset *kset);
68510 extern struct kset * __must_check kset_create_and_add(const char *name,
68511 - struct kset_uevent_ops *u,
68512 + const struct kset_uevent_ops *u,
68513 struct kobject *parent_kobj);
68514
68515 static inline struct kset *to_kset(struct kobject *kobj)
68516 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
68517 index c728a50..752d821 100644
68518 --- a/include/linux/kvm_host.h
68519 +++ b/include/linux/kvm_host.h
68520 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
68521 void vcpu_load(struct kvm_vcpu *vcpu);
68522 void vcpu_put(struct kvm_vcpu *vcpu);
68523
68524 -int kvm_init(void *opaque, unsigned int vcpu_size,
68525 +int kvm_init(const void *opaque, unsigned int vcpu_size,
68526 struct module *module);
68527 void kvm_exit(void);
68528
68529 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
68530 struct kvm_guest_debug *dbg);
68531 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
68532
68533 -int kvm_arch_init(void *opaque);
68534 +int kvm_arch_init(const void *opaque);
68535 void kvm_arch_exit(void);
68536
68537 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
68538 diff --git a/include/linux/libata.h b/include/linux/libata.h
68539 index a069916..223edde 100644
68540 --- a/include/linux/libata.h
68541 +++ b/include/linux/libata.h
68542 @@ -525,11 +525,11 @@ struct ata_ioports {
68543
68544 struct ata_host {
68545 spinlock_t lock;
68546 - struct device *dev;
68547 + struct device *dev;
68548 void __iomem * const *iomap;
68549 unsigned int n_ports;
68550 void *private_data;
68551 - struct ata_port_operations *ops;
68552 + const struct ata_port_operations *ops;
68553 unsigned long flags;
68554 #ifdef CONFIG_ATA_ACPI
68555 acpi_handle acpi_handle;
68556 @@ -710,7 +710,7 @@ struct ata_link {
68557
68558 struct ata_port {
68559 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
68560 - struct ata_port_operations *ops;
68561 + const struct ata_port_operations *ops;
68562 spinlock_t *lock;
68563 /* Flags owned by the EH context. Only EH should touch these once the
68564 port is active */
68565 @@ -884,7 +884,7 @@ struct ata_port_operations {
68566 * fields must be pointers.
68567 */
68568 const struct ata_port_operations *inherits;
68569 -};
68570 +} __do_const;
68571
68572 struct ata_port_info {
68573 unsigned long flags;
68574 @@ -892,7 +892,7 @@ struct ata_port_info {
68575 unsigned long pio_mask;
68576 unsigned long mwdma_mask;
68577 unsigned long udma_mask;
68578 - struct ata_port_operations *port_ops;
68579 + const struct ata_port_operations *port_ops;
68580 void *private_data;
68581 };
68582
68583 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
68584 extern const unsigned long sata_deb_timing_hotplug[];
68585 extern const unsigned long sata_deb_timing_long[];
68586
68587 -extern struct ata_port_operations ata_dummy_port_ops;
68588 +extern const struct ata_port_operations ata_dummy_port_ops;
68589 extern const struct ata_port_info ata_dummy_port_info;
68590
68591 static inline const unsigned long *
68592 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
68593 struct scsi_host_template *sht);
68594 extern void ata_host_detach(struct ata_host *host);
68595 extern void ata_host_init(struct ata_host *, struct device *,
68596 - unsigned long, struct ata_port_operations *);
68597 + unsigned long, const struct ata_port_operations *);
68598 extern int ata_scsi_detect(struct scsi_host_template *sht);
68599 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
68600 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
68601 diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
68602 index fbc48f8..0886e57 100644
68603 --- a/include/linux/lockd/bind.h
68604 +++ b/include/linux/lockd/bind.h
68605 @@ -23,13 +23,13 @@ struct svc_rqst;
68606 * This is the set of functions for lockd->nfsd communication
68607 */
68608 struct nlmsvc_binding {
68609 - __be32 (*fopen)(struct svc_rqst *,
68610 + __be32 (* const fopen)(struct svc_rqst *,
68611 struct nfs_fh *,
68612 struct file **);
68613 - void (*fclose)(struct file *);
68614 + void (* const fclose)(struct file *);
68615 };
68616
68617 -extern struct nlmsvc_binding * nlmsvc_ops;
68618 +extern const struct nlmsvc_binding * nlmsvc_ops;
68619
68620 /*
68621 * Similar to nfs_client_initdata, but without the NFS-specific
68622 diff --git a/include/linux/mca.h b/include/linux/mca.h
68623 index 3797270..7765ede 100644
68624 --- a/include/linux/mca.h
68625 +++ b/include/linux/mca.h
68626 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
68627 int region);
68628 void * (*mca_transform_memory)(struct mca_device *,
68629 void *memory);
68630 -};
68631 +} __no_const;
68632
68633 struct mca_bus {
68634 u64 default_dma_mask;
68635 diff --git a/include/linux/memory.h b/include/linux/memory.h
68636 index 37fa19b..b597c85 100644
68637 --- a/include/linux/memory.h
68638 +++ b/include/linux/memory.h
68639 @@ -108,7 +108,7 @@ struct memory_accessor {
68640 size_t count);
68641 ssize_t (*write)(struct memory_accessor *, const char *buf,
68642 off_t offset, size_t count);
68643 -};
68644 +} __no_const;
68645
68646 /*
68647 * Kernel text modification mutex, used for code patching. Users of this lock
68648 diff --git a/include/linux/mm.h b/include/linux/mm.h
68649 index 11e5be6..1ff2423 100644
68650 --- a/include/linux/mm.h
68651 +++ b/include/linux/mm.h
68652 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
68653
68654 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
68655 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
68656 +
68657 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68658 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
68659 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
68660 +#else
68661 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
68662 +#endif
68663 +
68664 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
68665 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
68666
68667 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
68668 int set_page_dirty_lock(struct page *page);
68669 int clear_page_dirty_for_io(struct page *page);
68670
68671 -/* Is the vma a continuation of the stack vma above it? */
68672 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
68673 -{
68674 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
68675 -}
68676 -
68677 extern unsigned long move_page_tables(struct vm_area_struct *vma,
68678 unsigned long old_addr, struct vm_area_struct *new_vma,
68679 unsigned long new_addr, unsigned long len);
68680 @@ -890,6 +891,8 @@ struct shrinker {
68681 extern void register_shrinker(struct shrinker *);
68682 extern void unregister_shrinker(struct shrinker *);
68683
68684 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
68685 +
68686 int vma_wants_writenotify(struct vm_area_struct *vma);
68687
68688 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
68689 @@ -1162,6 +1165,7 @@ out:
68690 }
68691
68692 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
68693 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
68694
68695 extern unsigned long do_brk(unsigned long, unsigned long);
68696
68697 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
68698 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
68699 struct vm_area_struct **pprev);
68700
68701 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
68702 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
68703 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
68704 +
68705 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
68706 NULL if none. Assume start_addr < end_addr. */
68707 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
68708 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
68709 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
68710 }
68711
68712 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
68713 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
68714 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
68715 unsigned long pfn, unsigned long size, pgprot_t);
68716 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
68717 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
68718 extern int sysctl_memory_failure_early_kill;
68719 extern int sysctl_memory_failure_recovery;
68720 -extern atomic_long_t mce_bad_pages;
68721 +extern atomic_long_unchecked_t mce_bad_pages;
68722 +
68723 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68724 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
68725 +#else
68726 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
68727 +#endif
68728
68729 #endif /* __KERNEL__ */
68730 #endif /* _LINUX_MM_H */
68731 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
68732 index 9d12ed5..6d9707a 100644
68733 --- a/include/linux/mm_types.h
68734 +++ b/include/linux/mm_types.h
68735 @@ -186,6 +186,8 @@ struct vm_area_struct {
68736 #ifdef CONFIG_NUMA
68737 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
68738 #endif
68739 +
68740 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
68741 };
68742
68743 struct core_thread {
68744 @@ -287,6 +289,24 @@ struct mm_struct {
68745 #ifdef CONFIG_MMU_NOTIFIER
68746 struct mmu_notifier_mm *mmu_notifier_mm;
68747 #endif
68748 +
68749 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68750 + unsigned long pax_flags;
68751 +#endif
68752 +
68753 +#ifdef CONFIG_PAX_DLRESOLVE
68754 + unsigned long call_dl_resolve;
68755 +#endif
68756 +
68757 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
68758 + unsigned long call_syscall;
68759 +#endif
68760 +
68761 +#ifdef CONFIG_PAX_ASLR
68762 + unsigned long delta_mmap; /* randomized offset */
68763 + unsigned long delta_stack; /* randomized offset */
68764 +#endif
68765 +
68766 };
68767
68768 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
68769 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
68770 index 4e02ee2..afb159e 100644
68771 --- a/include/linux/mmu_notifier.h
68772 +++ b/include/linux/mmu_notifier.h
68773 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
68774 */
68775 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
68776 ({ \
68777 - pte_t __pte; \
68778 + pte_t ___pte; \
68779 struct vm_area_struct *___vma = __vma; \
68780 unsigned long ___address = __address; \
68781 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
68782 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
68783 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
68784 - __pte; \
68785 + ___pte; \
68786 })
68787
68788 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
68789 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
68790 index 6c31a2a..4b0e930 100644
68791 --- a/include/linux/mmzone.h
68792 +++ b/include/linux/mmzone.h
68793 @@ -350,7 +350,7 @@ struct zone {
68794 unsigned long flags; /* zone flags, see below */
68795
68796 /* Zone statistics */
68797 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68798 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68799
68800 /*
68801 * prev_priority holds the scanning priority for this zone. It is
68802 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
68803 index f58e9d8..3503935 100644
68804 --- a/include/linux/mod_devicetable.h
68805 +++ b/include/linux/mod_devicetable.h
68806 @@ -12,7 +12,7 @@
68807 typedef unsigned long kernel_ulong_t;
68808 #endif
68809
68810 -#define PCI_ANY_ID (~0)
68811 +#define PCI_ANY_ID ((__u16)~0)
68812
68813 struct pci_device_id {
68814 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
68815 @@ -131,7 +131,7 @@ struct usb_device_id {
68816 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
68817 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
68818
68819 -#define HID_ANY_ID (~0)
68820 +#define HID_ANY_ID (~0U)
68821
68822 struct hid_device_id {
68823 __u16 bus;
68824 diff --git a/include/linux/module.h b/include/linux/module.h
68825 index 482efc8..642032b 100644
68826 --- a/include/linux/module.h
68827 +++ b/include/linux/module.h
68828 @@ -16,6 +16,7 @@
68829 #include <linux/kobject.h>
68830 #include <linux/moduleparam.h>
68831 #include <linux/tracepoint.h>
68832 +#include <linux/fs.h>
68833
68834 #include <asm/local.h>
68835 #include <asm/module.h>
68836 @@ -287,16 +288,16 @@ struct module
68837 int (*init)(void);
68838
68839 /* If this is non-NULL, vfree after init() returns */
68840 - void *module_init;
68841 + void *module_init_rx, *module_init_rw;
68842
68843 /* Here is the actual code + data, vfree'd on unload. */
68844 - void *module_core;
68845 + void *module_core_rx, *module_core_rw;
68846
68847 /* Here are the sizes of the init and core sections */
68848 - unsigned int init_size, core_size;
68849 + unsigned int init_size_rw, core_size_rw;
68850
68851 /* The size of the executable code in each section. */
68852 - unsigned int init_text_size, core_text_size;
68853 + unsigned int init_size_rx, core_size_rx;
68854
68855 /* Arch-specific module values */
68856 struct mod_arch_specific arch;
68857 @@ -345,6 +346,10 @@ struct module
68858 #ifdef CONFIG_EVENT_TRACING
68859 struct ftrace_event_call *trace_events;
68860 unsigned int num_trace_events;
68861 + struct file_operations trace_id;
68862 + struct file_operations trace_enable;
68863 + struct file_operations trace_format;
68864 + struct file_operations trace_filter;
68865 #endif
68866 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68867 unsigned long *ftrace_callsites;
68868 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
68869 bool is_module_address(unsigned long addr);
68870 bool is_module_text_address(unsigned long addr);
68871
68872 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68873 +{
68874 +
68875 +#ifdef CONFIG_PAX_KERNEXEC
68876 + if (ktla_ktva(addr) >= (unsigned long)start &&
68877 + ktla_ktva(addr) < (unsigned long)start + size)
68878 + return 1;
68879 +#endif
68880 +
68881 + return ((void *)addr >= start && (void *)addr < start + size);
68882 +}
68883 +
68884 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
68885 +{
68886 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
68887 +}
68888 +
68889 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
68890 +{
68891 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
68892 +}
68893 +
68894 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
68895 +{
68896 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
68897 +}
68898 +
68899 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
68900 +{
68901 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
68902 +}
68903 +
68904 static inline int within_module_core(unsigned long addr, struct module *mod)
68905 {
68906 - return (unsigned long)mod->module_core <= addr &&
68907 - addr < (unsigned long)mod->module_core + mod->core_size;
68908 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
68909 }
68910
68911 static inline int within_module_init(unsigned long addr, struct module *mod)
68912 {
68913 - return (unsigned long)mod->module_init <= addr &&
68914 - addr < (unsigned long)mod->module_init + mod->init_size;
68915 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
68916 }
68917
68918 /* Search for module by name: must hold module_mutex. */
68919 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
68920 index c1f40c2..682ca53 100644
68921 --- a/include/linux/moduleloader.h
68922 +++ b/include/linux/moduleloader.h
68923 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
68924 sections. Returns NULL on failure. */
68925 void *module_alloc(unsigned long size);
68926
68927 +#ifdef CONFIG_PAX_KERNEXEC
68928 +void *module_alloc_exec(unsigned long size);
68929 +#else
68930 +#define module_alloc_exec(x) module_alloc(x)
68931 +#endif
68932 +
68933 /* Free memory returned from module_alloc. */
68934 void module_free(struct module *mod, void *module_region);
68935
68936 +#ifdef CONFIG_PAX_KERNEXEC
68937 +void module_free_exec(struct module *mod, void *module_region);
68938 +#else
68939 +#define module_free_exec(x, y) module_free((x), (y))
68940 +#endif
68941 +
68942 /* Apply the given relocation to the (simplified) ELF. Return -error
68943 or 0. */
68944 int apply_relocate(Elf_Shdr *sechdrs,
68945 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
68946 index 82a9124..8a5f622 100644
68947 --- a/include/linux/moduleparam.h
68948 +++ b/include/linux/moduleparam.h
68949 @@ -132,7 +132,7 @@ struct kparam_array
68950
68951 /* Actually copy string: maxlen param is usually sizeof(string). */
68952 #define module_param_string(name, string, len, perm) \
68953 - static const struct kparam_string __param_string_##name \
68954 + static const struct kparam_string __param_string_##name __used \
68955 = { len, string }; \
68956 __module_param_call(MODULE_PARAM_PREFIX, name, \
68957 param_set_copystring, param_get_string, \
68958 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
68959
68960 /* Comma-separated array: *nump is set to number they actually specified. */
68961 #define module_param_array_named(name, array, type, nump, perm) \
68962 - static const struct kparam_array __param_arr_##name \
68963 + static const struct kparam_array __param_arr_##name __used \
68964 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
68965 sizeof(array[0]), array }; \
68966 __module_param_call(MODULE_PARAM_PREFIX, name, \
68967 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
68968 index 878cab4..c92cb3e 100644
68969 --- a/include/linux/mutex.h
68970 +++ b/include/linux/mutex.h
68971 @@ -51,7 +51,7 @@ struct mutex {
68972 spinlock_t wait_lock;
68973 struct list_head wait_list;
68974 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
68975 - struct thread_info *owner;
68976 + struct task_struct *owner;
68977 #endif
68978 #ifdef CONFIG_DEBUG_MUTEXES
68979 const char *name;
68980 diff --git a/include/linux/namei.h b/include/linux/namei.h
68981 index ec0f607..d19e675 100644
68982 --- a/include/linux/namei.h
68983 +++ b/include/linux/namei.h
68984 @@ -22,7 +22,7 @@ struct nameidata {
68985 unsigned int flags;
68986 int last_type;
68987 unsigned depth;
68988 - char *saved_names[MAX_NESTED_LINKS + 1];
68989 + const char *saved_names[MAX_NESTED_LINKS + 1];
68990
68991 /* Intent data */
68992 union {
68993 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
68994 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
68995 extern void unlock_rename(struct dentry *, struct dentry *);
68996
68997 -static inline void nd_set_link(struct nameidata *nd, char *path)
68998 +static inline void nd_set_link(struct nameidata *nd, const char *path)
68999 {
69000 nd->saved_names[nd->depth] = path;
69001 }
69002
69003 -static inline char *nd_get_link(struct nameidata *nd)
69004 +static inline const char *nd_get_link(const struct nameidata *nd)
69005 {
69006 return nd->saved_names[nd->depth];
69007 }
69008 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
69009 index 9d7e8f7..04428c5 100644
69010 --- a/include/linux/netdevice.h
69011 +++ b/include/linux/netdevice.h
69012 @@ -637,6 +637,7 @@ struct net_device_ops {
69013 u16 xid);
69014 #endif
69015 };
69016 +typedef struct net_device_ops __no_const net_device_ops_no_const;
69017
69018 /*
69019 * The DEVICE structure.
69020 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
69021 new file mode 100644
69022 index 0000000..33f4af8
69023 --- /dev/null
69024 +++ b/include/linux/netfilter/xt_gradm.h
69025 @@ -0,0 +1,9 @@
69026 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
69027 +#define _LINUX_NETFILTER_XT_GRADM_H 1
69028 +
69029 +struct xt_gradm_mtinfo {
69030 + __u16 flags;
69031 + __u16 invflags;
69032 +};
69033 +
69034 +#endif
69035 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
69036 index b359c4a..c08b334 100644
69037 --- a/include/linux/nodemask.h
69038 +++ b/include/linux/nodemask.h
69039 @@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
69040
69041 #define any_online_node(mask) \
69042 ({ \
69043 - int node; \
69044 - for_each_node_mask(node, (mask)) \
69045 - if (node_online(node)) \
69046 + int __node; \
69047 + for_each_node_mask(__node, (mask)) \
69048 + if (node_online(__node)) \
69049 break; \
69050 - node; \
69051 + __node; \
69052 })
69053
69054 #define num_online_nodes() num_node_state(N_ONLINE)
69055 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
69056 index 5171639..7cf4235 100644
69057 --- a/include/linux/oprofile.h
69058 +++ b/include/linux/oprofile.h
69059 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
69060 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
69061 char const * name, ulong * val);
69062
69063 -/** Create a file for read-only access to an atomic_t. */
69064 +/** Create a file for read-only access to an atomic_unchecked_t. */
69065 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
69066 - char const * name, atomic_t * val);
69067 + char const * name, atomic_unchecked_t * val);
69068
69069 /** create a directory */
69070 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
69071 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
69072 index 3c62ed4..8924c7c 100644
69073 --- a/include/linux/pagemap.h
69074 +++ b/include/linux/pagemap.h
69075 @@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
69076 if (((unsigned long)uaddr & PAGE_MASK) !=
69077 ((unsigned long)end & PAGE_MASK))
69078 ret = __get_user(c, end);
69079 + (void)c;
69080 }
69081 + (void)c;
69082 return ret;
69083 }
69084
69085 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
69086 index 81c9689..a567a55 100644
69087 --- a/include/linux/perf_event.h
69088 +++ b/include/linux/perf_event.h
69089 @@ -476,7 +476,7 @@ struct hw_perf_event {
69090 struct hrtimer hrtimer;
69091 };
69092 };
69093 - atomic64_t prev_count;
69094 + atomic64_unchecked_t prev_count;
69095 u64 sample_period;
69096 u64 last_period;
69097 atomic64_t period_left;
69098 @@ -557,7 +557,7 @@ struct perf_event {
69099 const struct pmu *pmu;
69100
69101 enum perf_event_active_state state;
69102 - atomic64_t count;
69103 + atomic64_unchecked_t count;
69104
69105 /*
69106 * These are the total time in nanoseconds that the event
69107 @@ -595,8 +595,8 @@ struct perf_event {
69108 * These accumulate total time (in nanoseconds) that children
69109 * events have been enabled and running, respectively.
69110 */
69111 - atomic64_t child_total_time_enabled;
69112 - atomic64_t child_total_time_running;
69113 + atomic64_unchecked_t child_total_time_enabled;
69114 + atomic64_unchecked_t child_total_time_running;
69115
69116 /*
69117 * Protect attach/detach and child_list:
69118 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
69119 index b43a9e0..b77d869 100644
69120 --- a/include/linux/pipe_fs_i.h
69121 +++ b/include/linux/pipe_fs_i.h
69122 @@ -46,9 +46,9 @@ struct pipe_inode_info {
69123 wait_queue_head_t wait;
69124 unsigned int nrbufs, curbuf;
69125 struct page *tmp_page;
69126 - unsigned int readers;
69127 - unsigned int writers;
69128 - unsigned int waiting_writers;
69129 + atomic_t readers;
69130 + atomic_t writers;
69131 + atomic_t waiting_writers;
69132 unsigned int r_counter;
69133 unsigned int w_counter;
69134 struct fasync_struct *fasync_readers;
69135 diff --git a/include/linux/poison.h b/include/linux/poison.h
69136 index 34066ff..e95d744 100644
69137 --- a/include/linux/poison.h
69138 +++ b/include/linux/poison.h
69139 @@ -19,8 +19,8 @@
69140 * under normal circumstances, used to verify that nobody uses
69141 * non-initialized list entries.
69142 */
69143 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
69144 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
69145 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
69146 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
69147
69148 /********** include/linux/timer.h **********/
69149 /*
69150 diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
69151 index 4f71bf4..cd2f68e 100644
69152 --- a/include/linux/posix-timers.h
69153 +++ b/include/linux/posix-timers.h
69154 @@ -82,7 +82,8 @@ struct k_clock {
69155 #define TIMER_RETRY 1
69156 void (*timer_get) (struct k_itimer * timr,
69157 struct itimerspec * cur_setting);
69158 -};
69159 +} __do_const;
69160 +typedef struct k_clock __no_const k_clock_no_const;
69161
69162 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
69163
69164 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
69165 index 72b1a10..13303a9 100644
69166 --- a/include/linux/preempt.h
69167 +++ b/include/linux/preempt.h
69168 @@ -110,7 +110,7 @@ struct preempt_ops {
69169 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
69170 void (*sched_out)(struct preempt_notifier *notifier,
69171 struct task_struct *next);
69172 -};
69173 +} __no_const;
69174
69175 /**
69176 * preempt_notifier - key for installing preemption notifiers
69177 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
69178 index 379eaed..1bf73e3 100644
69179 --- a/include/linux/proc_fs.h
69180 +++ b/include/linux/proc_fs.h
69181 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
69182 return proc_create_data(name, mode, parent, proc_fops, NULL);
69183 }
69184
69185 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
69186 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
69187 +{
69188 +#ifdef CONFIG_GRKERNSEC_PROC_USER
69189 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
69190 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69191 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
69192 +#else
69193 + return proc_create_data(name, mode, parent, proc_fops, NULL);
69194 +#endif
69195 +}
69196 +
69197 +
69198 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
69199 mode_t mode, struct proc_dir_entry *base,
69200 read_proc_t *read_proc, void * data)
69201 @@ -256,7 +269,7 @@ union proc_op {
69202 int (*proc_show)(struct seq_file *m,
69203 struct pid_namespace *ns, struct pid *pid,
69204 struct task_struct *task);
69205 -};
69206 +} __no_const;
69207
69208 struct ctl_table_header;
69209 struct ctl_table;
69210 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
69211 index 7456d7d..6c1cfc9 100644
69212 --- a/include/linux/ptrace.h
69213 +++ b/include/linux/ptrace.h
69214 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
69215 extern void exit_ptrace(struct task_struct *tracer);
69216 #define PTRACE_MODE_READ 1
69217 #define PTRACE_MODE_ATTACH 2
69218 -/* Returns 0 on success, -errno on denial. */
69219 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
69220 /* Returns true on success, false on denial. */
69221 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
69222 +/* Returns true on success, false on denial. */
69223 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
69224
69225 static inline int ptrace_reparented(struct task_struct *child)
69226 {
69227 diff --git a/include/linux/random.h b/include/linux/random.h
69228 index 2948046..3262567 100644
69229 --- a/include/linux/random.h
69230 +++ b/include/linux/random.h
69231 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
69232 u32 random32(void);
69233 void srandom32(u32 seed);
69234
69235 +static inline unsigned long pax_get_random_long(void)
69236 +{
69237 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
69238 +}
69239 +
69240 #endif /* __KERNEL___ */
69241
69242 #endif /* _LINUX_RANDOM_H */
69243 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
69244 index 988e55f..17cb4ef 100644
69245 --- a/include/linux/reboot.h
69246 +++ b/include/linux/reboot.h
69247 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
69248 * Architecture-specific implementations of sys_reboot commands.
69249 */
69250
69251 -extern void machine_restart(char *cmd);
69252 -extern void machine_halt(void);
69253 -extern void machine_power_off(void);
69254 +extern void machine_restart(char *cmd) __noreturn;
69255 +extern void machine_halt(void) __noreturn;
69256 +extern void machine_power_off(void) __noreturn;
69257
69258 extern void machine_shutdown(void);
69259 struct pt_regs;
69260 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
69261 */
69262
69263 extern void kernel_restart_prepare(char *cmd);
69264 -extern void kernel_restart(char *cmd);
69265 -extern void kernel_halt(void);
69266 -extern void kernel_power_off(void);
69267 +extern void kernel_restart(char *cmd) __noreturn;
69268 +extern void kernel_halt(void) __noreturn;
69269 +extern void kernel_power_off(void) __noreturn;
69270
69271 void ctrl_alt_del(void);
69272
69273 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
69274 * Emergency restart, callable from an interrupt handler.
69275 */
69276
69277 -extern void emergency_restart(void);
69278 +extern void emergency_restart(void) __noreturn;
69279 #include <asm/emergency-restart.h>
69280
69281 #endif
69282 diff --git a/include/linux/regset.h b/include/linux/regset.h
69283 index 8abee65..5150fd1 100644
69284 --- a/include/linux/regset.h
69285 +++ b/include/linux/regset.h
69286 @@ -335,6 +335,9 @@ static inline int copy_regset_to_user(struct task_struct *target,
69287 {
69288 const struct user_regset *regset = &view->regsets[setno];
69289
69290 + if (!regset->get)
69291 + return -EOPNOTSUPP;
69292 +
69293 if (!access_ok(VERIFY_WRITE, data, size))
69294 return -EIO;
69295
69296 @@ -358,6 +361,9 @@ static inline int copy_regset_from_user(struct task_struct *target,
69297 {
69298 const struct user_regset *regset = &view->regsets[setno];
69299
69300 + if (!regset->set)
69301 + return -EOPNOTSUPP;
69302 +
69303 if (!access_ok(VERIFY_READ, data, size))
69304 return -EIO;
69305
69306 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
69307 index dd31e7b..5b03c5c 100644
69308 --- a/include/linux/reiserfs_fs.h
69309 +++ b/include/linux/reiserfs_fs.h
69310 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
69311 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
69312
69313 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
69314 -#define get_generation(s) atomic_read (&fs_generation(s))
69315 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
69316 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
69317 #define __fs_changed(gen,s) (gen != get_generation (s))
69318 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
69319 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
69320 */
69321
69322 struct item_operations {
69323 - int (*bytes_number) (struct item_head * ih, int block_size);
69324 - void (*decrement_key) (struct cpu_key *);
69325 - int (*is_left_mergeable) (struct reiserfs_key * ih,
69326 + int (* const bytes_number) (struct item_head * ih, int block_size);
69327 + void (* const decrement_key) (struct cpu_key *);
69328 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
69329 unsigned long bsize);
69330 - void (*print_item) (struct item_head *, char *item);
69331 - void (*check_item) (struct item_head *, char *item);
69332 + void (* const print_item) (struct item_head *, char *item);
69333 + void (* const check_item) (struct item_head *, char *item);
69334
69335 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69336 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69337 int is_affected, int insert_size);
69338 - int (*check_left) (struct virtual_item * vi, int free,
69339 + int (* const check_left) (struct virtual_item * vi, int free,
69340 int start_skip, int end_skip);
69341 - int (*check_right) (struct virtual_item * vi, int free);
69342 - int (*part_size) (struct virtual_item * vi, int from, int to);
69343 - int (*unit_num) (struct virtual_item * vi);
69344 - void (*print_vi) (struct virtual_item * vi);
69345 + int (* const check_right) (struct virtual_item * vi, int free);
69346 + int (* const part_size) (struct virtual_item * vi, int from, int to);
69347 + int (* const unit_num) (struct virtual_item * vi);
69348 + void (* const print_vi) (struct virtual_item * vi);
69349 };
69350
69351 -extern struct item_operations *item_ops[TYPE_ANY + 1];
69352 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
69353
69354 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
69355 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
69356 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
69357 index dab68bb..0688727 100644
69358 --- a/include/linux/reiserfs_fs_sb.h
69359 +++ b/include/linux/reiserfs_fs_sb.h
69360 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
69361 /* Comment? -Hans */
69362 wait_queue_head_t s_wait;
69363 /* To be obsoleted soon by per buffer seals.. -Hans */
69364 - atomic_t s_generation_counter; // increased by one every time the
69365 + atomic_unchecked_t s_generation_counter; // increased by one every time the
69366 // tree gets re-balanced
69367 unsigned long s_properties; /* File system properties. Currently holds
69368 on-disk FS format */
69369 diff --git a/include/linux/relay.h b/include/linux/relay.h
69370 index 14a86bc..17d0700 100644
69371 --- a/include/linux/relay.h
69372 +++ b/include/linux/relay.h
69373 @@ -159,7 +159,7 @@ struct rchan_callbacks
69374 * The callback should return 0 if successful, negative if not.
69375 */
69376 int (*remove_buf_file)(struct dentry *dentry);
69377 -};
69378 +} __no_const;
69379
69380 /*
69381 * CONFIG_RELAY kernel API, kernel/relay.c
69382 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
69383 index 3392c59..a746428 100644
69384 --- a/include/linux/rfkill.h
69385 +++ b/include/linux/rfkill.h
69386 @@ -144,6 +144,7 @@ struct rfkill_ops {
69387 void (*query)(struct rfkill *rfkill, void *data);
69388 int (*set_block)(void *data, bool blocked);
69389 };
69390 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
69391
69392 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
69393 /**
69394 diff --git a/include/linux/sched.h b/include/linux/sched.h
69395 index 71849bf..2ef383dc3 100644
69396 --- a/include/linux/sched.h
69397 +++ b/include/linux/sched.h
69398 @@ -101,6 +101,7 @@ struct bio;
69399 struct fs_struct;
69400 struct bts_context;
69401 struct perf_event_context;
69402 +struct linux_binprm;
69403
69404 /*
69405 * List of flags we want to share for kernel threads,
69406 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
69407 extern signed long schedule_timeout_uninterruptible(signed long timeout);
69408 asmlinkage void __schedule(void);
69409 asmlinkage void schedule(void);
69410 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
69411 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
69412
69413 struct nsproxy;
69414 struct user_namespace;
69415 @@ -371,9 +372,12 @@ struct user_namespace;
69416 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
69417
69418 extern int sysctl_max_map_count;
69419 +extern unsigned long sysctl_heap_stack_gap;
69420
69421 #include <linux/aio.h>
69422
69423 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
69424 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
69425 extern unsigned long
69426 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
69427 unsigned long, unsigned long);
69428 @@ -666,6 +670,16 @@ struct signal_struct {
69429 struct tty_audit_buf *tty_audit_buf;
69430 #endif
69431
69432 +#ifdef CONFIG_GRKERNSEC
69433 + u32 curr_ip;
69434 + u32 saved_ip;
69435 + u32 gr_saddr;
69436 + u32 gr_daddr;
69437 + u16 gr_sport;
69438 + u16 gr_dport;
69439 + u8 used_accept:1;
69440 +#endif
69441 +
69442 int oom_adj; /* OOM kill score adjustment (bit shift) */
69443 };
69444
69445 @@ -723,6 +737,11 @@ struct user_struct {
69446 struct key *session_keyring; /* UID's default session keyring */
69447 #endif
69448
69449 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
69450 + unsigned int banned;
69451 + unsigned long ban_expires;
69452 +#endif
69453 +
69454 /* Hash table maintenance information */
69455 struct hlist_node uidhash_node;
69456 uid_t uid;
69457 @@ -1328,8 +1347,8 @@ struct task_struct {
69458 struct list_head thread_group;
69459
69460 struct completion *vfork_done; /* for vfork() */
69461 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
69462 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69463 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
69464 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69465
69466 cputime_t utime, stime, utimescaled, stimescaled;
69467 cputime_t gtime;
69468 @@ -1343,16 +1362,6 @@ struct task_struct {
69469 struct task_cputime cputime_expires;
69470 struct list_head cpu_timers[3];
69471
69472 -/* process credentials */
69473 - const struct cred *real_cred; /* objective and real subjective task
69474 - * credentials (COW) */
69475 - const struct cred *cred; /* effective (overridable) subjective task
69476 - * credentials (COW) */
69477 - struct mutex cred_guard_mutex; /* guard against foreign influences on
69478 - * credential calculations
69479 - * (notably. ptrace) */
69480 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
69481 -
69482 char comm[TASK_COMM_LEN]; /* executable name excluding path
69483 - access with [gs]et_task_comm (which lock
69484 it with task_lock())
69485 @@ -1369,6 +1378,10 @@ struct task_struct {
69486 #endif
69487 /* CPU-specific state of this task */
69488 struct thread_struct thread;
69489 +/* thread_info moved to task_struct */
69490 +#ifdef CONFIG_X86
69491 + struct thread_info tinfo;
69492 +#endif
69493 /* filesystem information */
69494 struct fs_struct *fs;
69495 /* open file information */
69496 @@ -1436,6 +1449,15 @@ struct task_struct {
69497 int hardirq_context;
69498 int softirq_context;
69499 #endif
69500 +
69501 +/* process credentials */
69502 + const struct cred *real_cred; /* objective and real subjective task
69503 + * credentials (COW) */
69504 + struct mutex cred_guard_mutex; /* guard against foreign influences on
69505 + * credential calculations
69506 + * (notably. ptrace) */
69507 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
69508 +
69509 #ifdef CONFIG_LOCKDEP
69510 # define MAX_LOCK_DEPTH 48UL
69511 u64 curr_chain_key;
69512 @@ -1456,6 +1478,9 @@ struct task_struct {
69513
69514 struct backing_dev_info *backing_dev_info;
69515
69516 + const struct cred *cred; /* effective (overridable) subjective task
69517 + * credentials (COW) */
69518 +
69519 struct io_context *io_context;
69520
69521 unsigned long ptrace_message;
69522 @@ -1519,6 +1544,27 @@ struct task_struct {
69523 unsigned long default_timer_slack_ns;
69524
69525 struct list_head *scm_work_list;
69526 +
69527 +#ifdef CONFIG_GRKERNSEC
69528 + /* grsecurity */
69529 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69530 + u64 exec_id;
69531 +#endif
69532 +#ifdef CONFIG_GRKERNSEC_SETXID
69533 + const struct cred *delayed_cred;
69534 +#endif
69535 + struct dentry *gr_chroot_dentry;
69536 + struct acl_subject_label *acl;
69537 + struct acl_role_label *role;
69538 + struct file *exec_file;
69539 + u16 acl_role_id;
69540 + /* is this the task that authenticated to the special role */
69541 + u8 acl_sp_role;
69542 + u8 is_writable;
69543 + u8 brute;
69544 + u8 gr_is_chrooted;
69545 +#endif
69546 +
69547 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
69548 /* Index of current stored adress in ret_stack */
69549 int curr_ret_stack;
69550 @@ -1542,6 +1588,57 @@ struct task_struct {
69551 #endif /* CONFIG_TRACING */
69552 };
69553
69554 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
69555 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
69556 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
69557 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
69558 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
69559 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
69560 +
69561 +#ifdef CONFIG_PAX_SOFTMODE
69562 +extern int pax_softmode;
69563 +#endif
69564 +
69565 +extern int pax_check_flags(unsigned long *);
69566 +
69567 +/* if tsk != current then task_lock must be held on it */
69568 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
69569 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
69570 +{
69571 + if (likely(tsk->mm))
69572 + return tsk->mm->pax_flags;
69573 + else
69574 + return 0UL;
69575 +}
69576 +
69577 +/* if tsk != current then task_lock must be held on it */
69578 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
69579 +{
69580 + if (likely(tsk->mm)) {
69581 + tsk->mm->pax_flags = flags;
69582 + return 0;
69583 + }
69584 + return -EINVAL;
69585 +}
69586 +#endif
69587 +
69588 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
69589 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
69590 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
69591 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
69592 +#endif
69593 +
69594 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
69595 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
69596 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
69597 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
69598 +
69599 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
69600 +extern void pax_track_stack(void);
69601 +#else
69602 +static inline void pax_track_stack(void) {}
69603 +#endif
69604 +
69605 /* Future-safe accessor for struct task_struct's cpus_allowed. */
69606 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
69607
69608 @@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
69609 #define PF_DUMPCORE 0x00000200 /* dumped core */
69610 #define PF_SIGNALED 0x00000400 /* killed by a signal */
69611 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
69612 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
69613 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
69614 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
69615 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
69616 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
69617 @@ -1978,7 +2075,9 @@ void yield(void);
69618 extern struct exec_domain default_exec_domain;
69619
69620 union thread_union {
69621 +#ifndef CONFIG_X86
69622 struct thread_info thread_info;
69623 +#endif
69624 unsigned long stack[THREAD_SIZE/sizeof(long)];
69625 };
69626
69627 @@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
69628 */
69629
69630 extern struct task_struct *find_task_by_vpid(pid_t nr);
69631 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
69632 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
69633 struct pid_namespace *ns);
69634
69635 @@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
69636 extern void exit_itimers(struct signal_struct *);
69637 extern void flush_itimer_signals(void);
69638
69639 -extern NORET_TYPE void do_group_exit(int);
69640 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
69641
69642 extern void daemonize(const char *, ...);
69643 extern int allow_signal(int);
69644 @@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
69645
69646 #endif
69647
69648 -static inline int object_is_on_stack(void *obj)
69649 +static inline int object_starts_on_stack(void *obj)
69650 {
69651 - void *stack = task_stack_page(current);
69652 + const void *stack = task_stack_page(current);
69653
69654 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
69655 }
69656
69657 +#ifdef CONFIG_PAX_USERCOPY
69658 +extern int object_is_on_stack(const void *obj, unsigned long len);
69659 +#endif
69660 +
69661 extern void thread_info_cache_init(void);
69662
69663 #ifdef CONFIG_DEBUG_STACK_USAGE
69664 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
69665 index 1ee2c05..81b7ec4 100644
69666 --- a/include/linux/screen_info.h
69667 +++ b/include/linux/screen_info.h
69668 @@ -42,7 +42,8 @@ struct screen_info {
69669 __u16 pages; /* 0x32 */
69670 __u16 vesa_attributes; /* 0x34 */
69671 __u32 capabilities; /* 0x36 */
69672 - __u8 _reserved[6]; /* 0x3a */
69673 + __u16 vesapm_size; /* 0x3a */
69674 + __u8 _reserved[4]; /* 0x3c */
69675 } __attribute__((packed));
69676
69677 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
69678 diff --git a/include/linux/security.h b/include/linux/security.h
69679 index d40d23f..d739b08 100644
69680 --- a/include/linux/security.h
69681 +++ b/include/linux/security.h
69682 @@ -34,6 +34,7 @@
69683 #include <linux/key.h>
69684 #include <linux/xfrm.h>
69685 #include <linux/gfp.h>
69686 +#include <linux/grsecurity.h>
69687 #include <net/flow.h>
69688
69689 /* Maximum number of letters for an LSM name string */
69690 @@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
69691 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
69692 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
69693 extern int cap_task_setnice(struct task_struct *p, int nice);
69694 -extern int cap_syslog(int type);
69695 +extern int cap_syslog(int type, bool from_file);
69696 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
69697
69698 struct msghdr;
69699 @@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
69700 * logging to the console.
69701 * See the syslog(2) manual page for an explanation of the @type values.
69702 * @type contains the type of action.
69703 + * @from_file indicates the context of action (if it came from /proc).
69704 * Return 0 if permission is granted.
69705 * @settime:
69706 * Check permission to change the system time.
69707 @@ -1445,7 +1447,7 @@ struct security_operations {
69708 int (*sysctl) (struct ctl_table *table, int op);
69709 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
69710 int (*quota_on) (struct dentry *dentry);
69711 - int (*syslog) (int type);
69712 + int (*syslog) (int type, bool from_file);
69713 int (*settime) (struct timespec *ts, struct timezone *tz);
69714 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
69715
69716 @@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
69717 int security_sysctl(struct ctl_table *table, int op);
69718 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
69719 int security_quota_on(struct dentry *dentry);
69720 -int security_syslog(int type);
69721 +int security_syslog(int type, bool from_file);
69722 int security_settime(struct timespec *ts, struct timezone *tz);
69723 int security_vm_enough_memory(long pages);
69724 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
69725 @@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
69726 return 0;
69727 }
69728
69729 -static inline int security_syslog(int type)
69730 +static inline int security_syslog(int type, bool from_file)
69731 {
69732 - return cap_syslog(type);
69733 + return cap_syslog(type, from_file);
69734 }
69735
69736 static inline int security_settime(struct timespec *ts, struct timezone *tz)
69737 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
69738 index 8366d8f..cc5f9d6 100644
69739 --- a/include/linux/seq_file.h
69740 +++ b/include/linux/seq_file.h
69741 @@ -23,6 +23,9 @@ struct seq_file {
69742 u64 version;
69743 struct mutex lock;
69744 const struct seq_operations *op;
69745 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69746 + u64 exec_id;
69747 +#endif
69748 void *private;
69749 };
69750
69751 @@ -32,6 +35,7 @@ struct seq_operations {
69752 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
69753 int (*show) (struct seq_file *m, void *v);
69754 };
69755 +typedef struct seq_operations __no_const seq_operations_no_const;
69756
69757 #define SEQ_SKIP 1
69758
69759 diff --git a/include/linux/shm.h b/include/linux/shm.h
69760 index eca6235..c7417ed 100644
69761 --- a/include/linux/shm.h
69762 +++ b/include/linux/shm.h
69763 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
69764 pid_t shm_cprid;
69765 pid_t shm_lprid;
69766 struct user_struct *mlock_user;
69767 +#ifdef CONFIG_GRKERNSEC
69768 + time_t shm_createtime;
69769 + pid_t shm_lapid;
69770 +#endif
69771 };
69772
69773 /* shm_mode upper byte flags */
69774 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
69775 index bcdd660..6e12e11 100644
69776 --- a/include/linux/skbuff.h
69777 +++ b/include/linux/skbuff.h
69778 @@ -14,6 +14,7 @@
69779 #ifndef _LINUX_SKBUFF_H
69780 #define _LINUX_SKBUFF_H
69781
69782 +#include <linux/const.h>
69783 #include <linux/kernel.h>
69784 #include <linux/kmemcheck.h>
69785 #include <linux/compiler.h>
69786 @@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
69787 */
69788 static inline int skb_queue_empty(const struct sk_buff_head *list)
69789 {
69790 - return list->next == (struct sk_buff *)list;
69791 + return list->next == (const struct sk_buff *)list;
69792 }
69793
69794 /**
69795 @@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
69796 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69797 const struct sk_buff *skb)
69798 {
69799 - return (skb->next == (struct sk_buff *) list);
69800 + return (skb->next == (const struct sk_buff *) list);
69801 }
69802
69803 /**
69804 @@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69805 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
69806 const struct sk_buff *skb)
69807 {
69808 - return (skb->prev == (struct sk_buff *) list);
69809 + return (skb->prev == (const struct sk_buff *) list);
69810 }
69811
69812 /**
69813 @@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
69814 * headroom, you should not reduce this.
69815 */
69816 #ifndef NET_SKB_PAD
69817 -#define NET_SKB_PAD 32
69818 +#define NET_SKB_PAD (_AC(32,UL))
69819 #endif
69820
69821 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
69822 diff --git a/include/linux/slab.h b/include/linux/slab.h
69823 index 2da8372..a3be824 100644
69824 --- a/include/linux/slab.h
69825 +++ b/include/linux/slab.h
69826 @@ -11,12 +11,20 @@
69827
69828 #include <linux/gfp.h>
69829 #include <linux/types.h>
69830 +#include <linux/err.h>
69831
69832 /*
69833 * Flags to pass to kmem_cache_create().
69834 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
69835 */
69836 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
69837 +
69838 +#ifdef CONFIG_PAX_USERCOPY
69839 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
69840 +#else
69841 +#define SLAB_USERCOPY 0x00000000UL
69842 +#endif
69843 +
69844 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
69845 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
69846 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
69847 @@ -82,10 +90,13 @@
69848 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
69849 * Both make kfree a no-op.
69850 */
69851 -#define ZERO_SIZE_PTR ((void *)16)
69852 +#define ZERO_SIZE_PTR \
69853 +({ \
69854 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
69855 + (void *)(-MAX_ERRNO-1L); \
69856 +})
69857
69858 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
69859 - (unsigned long)ZERO_SIZE_PTR)
69860 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
69861
69862 /*
69863 * struct kmem_cache related prototypes
69864 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
69865 void kfree(const void *);
69866 void kzfree(const void *);
69867 size_t ksize(const void *);
69868 +void check_object_size(const void *ptr, unsigned long n, bool to);
69869
69870 /*
69871 * Allocator specific definitions. These are mainly used to establish optimized
69872 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
69873
69874 void __init kmem_cache_init_late(void);
69875
69876 +#define kmalloc(x, y) \
69877 +({ \
69878 + void *___retval; \
69879 + intoverflow_t ___x = (intoverflow_t)x; \
69880 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
69881 + ___retval = NULL; \
69882 + else \
69883 + ___retval = kmalloc((size_t)___x, (y)); \
69884 + ___retval; \
69885 +})
69886 +
69887 +#define kmalloc_node(x, y, z) \
69888 +({ \
69889 + void *___retval; \
69890 + intoverflow_t ___x = (intoverflow_t)x; \
69891 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
69892 + ___retval = NULL; \
69893 + else \
69894 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
69895 + ___retval; \
69896 +})
69897 +
69898 +#define kzalloc(x, y) \
69899 +({ \
69900 + void *___retval; \
69901 + intoverflow_t ___x = (intoverflow_t)x; \
69902 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
69903 + ___retval = NULL; \
69904 + else \
69905 + ___retval = kzalloc((size_t)___x, (y)); \
69906 + ___retval; \
69907 +})
69908 +
69909 #endif /* _LINUX_SLAB_H */
69910 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
69911 index 850d057..d9dfe3c 100644
69912 --- a/include/linux/slab_def.h
69913 +++ b/include/linux/slab_def.h
69914 @@ -69,10 +69,10 @@ struct kmem_cache {
69915 unsigned long node_allocs;
69916 unsigned long node_frees;
69917 unsigned long node_overflow;
69918 - atomic_t allochit;
69919 - atomic_t allocmiss;
69920 - atomic_t freehit;
69921 - atomic_t freemiss;
69922 + atomic_unchecked_t allochit;
69923 + atomic_unchecked_t allocmiss;
69924 + atomic_unchecked_t freehit;
69925 + atomic_unchecked_t freemiss;
69926
69927 /*
69928 * If debugging is enabled, then the allocator can add additional
69929 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
69930 index 5ad70a6..57f9f65 100644
69931 --- a/include/linux/slub_def.h
69932 +++ b/include/linux/slub_def.h
69933 @@ -86,7 +86,7 @@ struct kmem_cache {
69934 struct kmem_cache_order_objects max;
69935 struct kmem_cache_order_objects min;
69936 gfp_t allocflags; /* gfp flags to use on each alloc */
69937 - int refcount; /* Refcount for slab cache destroy */
69938 + atomic_t refcount; /* Refcount for slab cache destroy */
69939 void (*ctor)(void *);
69940 int inuse; /* Offset to metadata */
69941 int align; /* Alignment */
69942 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
69943 #endif
69944
69945 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
69946 -void *__kmalloc(size_t size, gfp_t flags);
69947 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
69948
69949 #ifdef CONFIG_KMEMTRACE
69950 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
69951 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
69952 index 67ad11f..0bbd8af 100644
69953 --- a/include/linux/sonet.h
69954 +++ b/include/linux/sonet.h
69955 @@ -61,7 +61,7 @@ struct sonet_stats {
69956 #include <asm/atomic.h>
69957
69958 struct k_sonet_stats {
69959 -#define __HANDLE_ITEM(i) atomic_t i
69960 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
69961 __SONET_ITEMS
69962 #undef __HANDLE_ITEM
69963 };
69964 diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
69965 index 6f52b4d..5500323 100644
69966 --- a/include/linux/sunrpc/cache.h
69967 +++ b/include/linux/sunrpc/cache.h
69968 @@ -125,7 +125,7 @@ struct cache_detail {
69969 */
69970 struct cache_req {
69971 struct cache_deferred_req *(*defer)(struct cache_req *req);
69972 -};
69973 +} __no_const;
69974 /* this must be embedded in a deferred_request that is being
69975 * delayed awaiting cache-fill
69976 */
69977 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
69978 index 8ed9642..101ceab 100644
69979 --- a/include/linux/sunrpc/clnt.h
69980 +++ b/include/linux/sunrpc/clnt.h
69981 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
69982 {
69983 switch (sap->sa_family) {
69984 case AF_INET:
69985 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
69986 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
69987 case AF_INET6:
69988 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
69989 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
69990 }
69991 return 0;
69992 }
69993 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
69994 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
69995 const struct sockaddr *src)
69996 {
69997 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
69998 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
69999 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
70000
70001 dsin->sin_family = ssin->sin_family;
70002 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
70003 if (sa->sa_family != AF_INET6)
70004 return 0;
70005
70006 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
70007 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
70008 }
70009
70010 #endif /* __KERNEL__ */
70011 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
70012 index c14fe86..393245e 100644
70013 --- a/include/linux/sunrpc/svc_rdma.h
70014 +++ b/include/linux/sunrpc/svc_rdma.h
70015 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
70016 extern unsigned int svcrdma_max_requests;
70017 extern unsigned int svcrdma_max_req_size;
70018
70019 -extern atomic_t rdma_stat_recv;
70020 -extern atomic_t rdma_stat_read;
70021 -extern atomic_t rdma_stat_write;
70022 -extern atomic_t rdma_stat_sq_starve;
70023 -extern atomic_t rdma_stat_rq_starve;
70024 -extern atomic_t rdma_stat_rq_poll;
70025 -extern atomic_t rdma_stat_rq_prod;
70026 -extern atomic_t rdma_stat_sq_poll;
70027 -extern atomic_t rdma_stat_sq_prod;
70028 +extern atomic_unchecked_t rdma_stat_recv;
70029 +extern atomic_unchecked_t rdma_stat_read;
70030 +extern atomic_unchecked_t rdma_stat_write;
70031 +extern atomic_unchecked_t rdma_stat_sq_starve;
70032 +extern atomic_unchecked_t rdma_stat_rq_starve;
70033 +extern atomic_unchecked_t rdma_stat_rq_poll;
70034 +extern atomic_unchecked_t rdma_stat_rq_prod;
70035 +extern atomic_unchecked_t rdma_stat_sq_poll;
70036 +extern atomic_unchecked_t rdma_stat_sq_prod;
70037
70038 #define RPCRDMA_VERSION 1
70039
70040 diff --git a/include/linux/suspend.h b/include/linux/suspend.h
70041 index 5e781d8..1e62818 100644
70042 --- a/include/linux/suspend.h
70043 +++ b/include/linux/suspend.h
70044 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
70045 * which require special recovery actions in that situation.
70046 */
70047 struct platform_suspend_ops {
70048 - int (*valid)(suspend_state_t state);
70049 - int (*begin)(suspend_state_t state);
70050 - int (*prepare)(void);
70051 - int (*prepare_late)(void);
70052 - int (*enter)(suspend_state_t state);
70053 - void (*wake)(void);
70054 - void (*finish)(void);
70055 - void (*end)(void);
70056 - void (*recover)(void);
70057 + int (* const valid)(suspend_state_t state);
70058 + int (* const begin)(suspend_state_t state);
70059 + int (* const prepare)(void);
70060 + int (* const prepare_late)(void);
70061 + int (* const enter)(suspend_state_t state);
70062 + void (* const wake)(void);
70063 + void (* const finish)(void);
70064 + void (* const end)(void);
70065 + void (* const recover)(void);
70066 };
70067
70068 #ifdef CONFIG_SUSPEND
70069 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
70070 * suspend_set_ops - set platform dependent suspend operations
70071 * @ops: The new suspend operations to set.
70072 */
70073 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
70074 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
70075 extern int suspend_valid_only_mem(suspend_state_t state);
70076
70077 /**
70078 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
70079 #else /* !CONFIG_SUSPEND */
70080 #define suspend_valid_only_mem NULL
70081
70082 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
70083 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
70084 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
70085 #endif /* !CONFIG_SUSPEND */
70086
70087 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
70088 * platforms which require special recovery actions in that situation.
70089 */
70090 struct platform_hibernation_ops {
70091 - int (*begin)(void);
70092 - void (*end)(void);
70093 - int (*pre_snapshot)(void);
70094 - void (*finish)(void);
70095 - int (*prepare)(void);
70096 - int (*enter)(void);
70097 - void (*leave)(void);
70098 - int (*pre_restore)(void);
70099 - void (*restore_cleanup)(void);
70100 - void (*recover)(void);
70101 + int (* const begin)(void);
70102 + void (* const end)(void);
70103 + int (* const pre_snapshot)(void);
70104 + void (* const finish)(void);
70105 + int (* const prepare)(void);
70106 + int (* const enter)(void);
70107 + void (* const leave)(void);
70108 + int (* const pre_restore)(void);
70109 + void (* const restore_cleanup)(void);
70110 + void (* const recover)(void);
70111 };
70112
70113 #ifdef CONFIG_HIBERNATION
70114 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
70115 extern void swsusp_unset_page_free(struct page *);
70116 extern unsigned long get_safe_page(gfp_t gfp_mask);
70117
70118 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
70119 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
70120 extern int hibernate(void);
70121 extern bool system_entering_hibernation(void);
70122 #else /* CONFIG_HIBERNATION */
70123 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
70124 static inline void swsusp_set_page_free(struct page *p) {}
70125 static inline void swsusp_unset_page_free(struct page *p) {}
70126
70127 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
70128 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
70129 static inline int hibernate(void) { return -ENOSYS; }
70130 static inline bool system_entering_hibernation(void) { return false; }
70131 #endif /* CONFIG_HIBERNATION */
70132 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
70133 index 0eb6942..a805cb6 100644
70134 --- a/include/linux/sysctl.h
70135 +++ b/include/linux/sysctl.h
70136 @@ -164,7 +164,11 @@ enum
70137 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
70138 };
70139
70140 -
70141 +#ifdef CONFIG_PAX_SOFTMODE
70142 +enum {
70143 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
70144 +};
70145 +#endif
70146
70147 /* CTL_VM names: */
70148 enum
70149 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
70150
70151 extern int proc_dostring(struct ctl_table *, int,
70152 void __user *, size_t *, loff_t *);
70153 +extern int proc_dostring_modpriv(struct ctl_table *, int,
70154 + void __user *, size_t *, loff_t *);
70155 extern int proc_dointvec(struct ctl_table *, int,
70156 void __user *, size_t *, loff_t *);
70157 extern int proc_dointvec_minmax(struct ctl_table *, int,
70158 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
70159
70160 extern ctl_handler sysctl_data;
70161 extern ctl_handler sysctl_string;
70162 +extern ctl_handler sysctl_string_modpriv;
70163 extern ctl_handler sysctl_intvec;
70164 extern ctl_handler sysctl_jiffies;
70165 extern ctl_handler sysctl_ms_jiffies;
70166 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
70167 index 9d68fed..71f02cc 100644
70168 --- a/include/linux/sysfs.h
70169 +++ b/include/linux/sysfs.h
70170 @@ -75,8 +75,8 @@ struct bin_attribute {
70171 };
70172
70173 struct sysfs_ops {
70174 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
70175 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
70176 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
70177 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
70178 };
70179
70180 struct sysfs_dirent;
70181 diff --git a/include/linux/syslog.h b/include/linux/syslog.h
70182 new file mode 100644
70183 index 0000000..3891139
70184 --- /dev/null
70185 +++ b/include/linux/syslog.h
70186 @@ -0,0 +1,52 @@
70187 +/* Syslog internals
70188 + *
70189 + * Copyright 2010 Canonical, Ltd.
70190 + * Author: Kees Cook <kees.cook@canonical.com>
70191 + *
70192 + * This program is free software; you can redistribute it and/or modify
70193 + * it under the terms of the GNU General Public License as published by
70194 + * the Free Software Foundation; either version 2, or (at your option)
70195 + * any later version.
70196 + *
70197 + * This program is distributed in the hope that it will be useful,
70198 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
70199 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
70200 + * GNU General Public License for more details.
70201 + *
70202 + * You should have received a copy of the GNU General Public License
70203 + * along with this program; see the file COPYING. If not, write to
70204 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
70205 + */
70206 +
70207 +#ifndef _LINUX_SYSLOG_H
70208 +#define _LINUX_SYSLOG_H
70209 +
70210 +/* Close the log. Currently a NOP. */
70211 +#define SYSLOG_ACTION_CLOSE 0
70212 +/* Open the log. Currently a NOP. */
70213 +#define SYSLOG_ACTION_OPEN 1
70214 +/* Read from the log. */
70215 +#define SYSLOG_ACTION_READ 2
70216 +/* Read all messages remaining in the ring buffer. */
70217 +#define SYSLOG_ACTION_READ_ALL 3
70218 +/* Read and clear all messages remaining in the ring buffer */
70219 +#define SYSLOG_ACTION_READ_CLEAR 4
70220 +/* Clear ring buffer. */
70221 +#define SYSLOG_ACTION_CLEAR 5
70222 +/* Disable printk's to console */
70223 +#define SYSLOG_ACTION_CONSOLE_OFF 6
70224 +/* Enable printk's to console */
70225 +#define SYSLOG_ACTION_CONSOLE_ON 7
70226 +/* Set level of messages printed to console */
70227 +#define SYSLOG_ACTION_CONSOLE_LEVEL 8
70228 +/* Return number of unread characters in the log buffer */
70229 +#define SYSLOG_ACTION_SIZE_UNREAD 9
70230 +/* Return size of the log buffer */
70231 +#define SYSLOG_ACTION_SIZE_BUFFER 10
70232 +
70233 +#define SYSLOG_FROM_CALL 0
70234 +#define SYSLOG_FROM_FILE 1
70235 +
70236 +int do_syslog(int type, char __user *buf, int count, bool from_file);
70237 +
70238 +#endif /* _LINUX_SYSLOG_H */
70239 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
70240 index a8cc4e1..98d3b85 100644
70241 --- a/include/linux/thread_info.h
70242 +++ b/include/linux/thread_info.h
70243 @@ -23,7 +23,7 @@ struct restart_block {
70244 };
70245 /* For futex_wait and futex_wait_requeue_pi */
70246 struct {
70247 - u32 *uaddr;
70248 + u32 __user *uaddr;
70249 u32 val;
70250 u32 flags;
70251 u32 bitset;
70252 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
70253 index 1eb44a9..f582df3 100644
70254 --- a/include/linux/tracehook.h
70255 +++ b/include/linux/tracehook.h
70256 @@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
70257 /*
70258 * ptrace report for syscall entry and exit looks identical.
70259 */
70260 -static inline void ptrace_report_syscall(struct pt_regs *regs)
70261 +static inline int ptrace_report_syscall(struct pt_regs *regs)
70262 {
70263 int ptrace = task_ptrace(current);
70264
70265 if (!(ptrace & PT_PTRACED))
70266 - return;
70267 + return 0;
70268
70269 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
70270
70271 @@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70272 send_sig(current->exit_code, current, 1);
70273 current->exit_code = 0;
70274 }
70275 +
70276 + return fatal_signal_pending(current);
70277 }
70278
70279 /**
70280 @@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70281 static inline __must_check int tracehook_report_syscall_entry(
70282 struct pt_regs *regs)
70283 {
70284 - ptrace_report_syscall(regs);
70285 - return 0;
70286 + return ptrace_report_syscall(regs);
70287 }
70288
70289 /**
70290 diff --git a/include/linux/tty.h b/include/linux/tty.h
70291 index e9c57e9..ee6d489 100644
70292 --- a/include/linux/tty.h
70293 +++ b/include/linux/tty.h
70294 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
70295 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
70296 extern void tty_ldisc_enable(struct tty_struct *tty);
70297
70298 -
70299 /* n_tty.c */
70300 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
70301
70302 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
70303 index 0c4ee9b..9f7c426 100644
70304 --- a/include/linux/tty_ldisc.h
70305 +++ b/include/linux/tty_ldisc.h
70306 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
70307
70308 struct module *owner;
70309
70310 - int refcount;
70311 + atomic_t refcount;
70312 };
70313
70314 struct tty_ldisc {
70315 diff --git a/include/linux/types.h b/include/linux/types.h
70316 index c42724f..d190eee 100644
70317 --- a/include/linux/types.h
70318 +++ b/include/linux/types.h
70319 @@ -191,10 +191,26 @@ typedef struct {
70320 volatile int counter;
70321 } atomic_t;
70322
70323 +#ifdef CONFIG_PAX_REFCOUNT
70324 +typedef struct {
70325 + volatile int counter;
70326 +} atomic_unchecked_t;
70327 +#else
70328 +typedef atomic_t atomic_unchecked_t;
70329 +#endif
70330 +
70331 #ifdef CONFIG_64BIT
70332 typedef struct {
70333 volatile long counter;
70334 } atomic64_t;
70335 +
70336 +#ifdef CONFIG_PAX_REFCOUNT
70337 +typedef struct {
70338 + volatile long counter;
70339 +} atomic64_unchecked_t;
70340 +#else
70341 +typedef atomic64_t atomic64_unchecked_t;
70342 +#endif
70343 #endif
70344
70345 struct ustat {
70346 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
70347 index 6b58367..53a3e8e 100644
70348 --- a/include/linux/uaccess.h
70349 +++ b/include/linux/uaccess.h
70350 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70351 long ret; \
70352 mm_segment_t old_fs = get_fs(); \
70353 \
70354 - set_fs(KERNEL_DS); \
70355 pagefault_disable(); \
70356 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
70357 - pagefault_enable(); \
70358 + set_fs(KERNEL_DS); \
70359 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
70360 set_fs(old_fs); \
70361 + pagefault_enable(); \
70362 ret; \
70363 })
70364
70365 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70366 * Safely read from address @src to the buffer at @dst. If a kernel fault
70367 * happens, handle that and return -EFAULT.
70368 */
70369 -extern long probe_kernel_read(void *dst, void *src, size_t size);
70370 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
70371
70372 /*
70373 * probe_kernel_write(): safely attempt to write to a location
70374 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
70375 * Safely write to address @dst from the buffer at @src. If a kernel fault
70376 * happens, handle that and return -EFAULT.
70377 */
70378 -extern long probe_kernel_write(void *dst, void *src, size_t size);
70379 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
70380
70381 #endif /* __LINUX_UACCESS_H__ */
70382 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
70383 index 99c1b4d..bb94261 100644
70384 --- a/include/linux/unaligned/access_ok.h
70385 +++ b/include/linux/unaligned/access_ok.h
70386 @@ -6,32 +6,32 @@
70387
70388 static inline u16 get_unaligned_le16(const void *p)
70389 {
70390 - return le16_to_cpup((__le16 *)p);
70391 + return le16_to_cpup((const __le16 *)p);
70392 }
70393
70394 static inline u32 get_unaligned_le32(const void *p)
70395 {
70396 - return le32_to_cpup((__le32 *)p);
70397 + return le32_to_cpup((const __le32 *)p);
70398 }
70399
70400 static inline u64 get_unaligned_le64(const void *p)
70401 {
70402 - return le64_to_cpup((__le64 *)p);
70403 + return le64_to_cpup((const __le64 *)p);
70404 }
70405
70406 static inline u16 get_unaligned_be16(const void *p)
70407 {
70408 - return be16_to_cpup((__be16 *)p);
70409 + return be16_to_cpup((const __be16 *)p);
70410 }
70411
70412 static inline u32 get_unaligned_be32(const void *p)
70413 {
70414 - return be32_to_cpup((__be32 *)p);
70415 + return be32_to_cpup((const __be32 *)p);
70416 }
70417
70418 static inline u64 get_unaligned_be64(const void *p)
70419 {
70420 - return be64_to_cpup((__be64 *)p);
70421 + return be64_to_cpup((const __be64 *)p);
70422 }
70423
70424 static inline void put_unaligned_le16(u16 val, void *p)
70425 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
70426 index 79b9837..b5a56f9 100644
70427 --- a/include/linux/vermagic.h
70428 +++ b/include/linux/vermagic.h
70429 @@ -26,9 +26,35 @@
70430 #define MODULE_ARCH_VERMAGIC ""
70431 #endif
70432
70433 +#ifdef CONFIG_PAX_REFCOUNT
70434 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
70435 +#else
70436 +#define MODULE_PAX_REFCOUNT ""
70437 +#endif
70438 +
70439 +#ifdef CONSTIFY_PLUGIN
70440 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
70441 +#else
70442 +#define MODULE_CONSTIFY_PLUGIN ""
70443 +#endif
70444 +
70445 +#ifdef STACKLEAK_PLUGIN
70446 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
70447 +#else
70448 +#define MODULE_STACKLEAK_PLUGIN ""
70449 +#endif
70450 +
70451 +#ifdef CONFIG_GRKERNSEC
70452 +#define MODULE_GRSEC "GRSEC "
70453 +#else
70454 +#define MODULE_GRSEC ""
70455 +#endif
70456 +
70457 #define VERMAGIC_STRING \
70458 UTS_RELEASE " " \
70459 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
70460 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
70461 - MODULE_ARCH_VERMAGIC
70462 + MODULE_ARCH_VERMAGIC \
70463 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
70464 + MODULE_GRSEC
70465
70466 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
70467 index 819a634..462ac12 100644
70468 --- a/include/linux/vmalloc.h
70469 +++ b/include/linux/vmalloc.h
70470 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
70471 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
70472 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
70473 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
70474 +
70475 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70476 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
70477 +#endif
70478 +
70479 /* bits [20..32] reserved for arch specific ioremap internals */
70480
70481 /*
70482 @@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
70483
70484 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
70485
70486 +#define vmalloc(x) \
70487 +({ \
70488 + void *___retval; \
70489 + intoverflow_t ___x = (intoverflow_t)x; \
70490 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
70491 + ___retval = NULL; \
70492 + else \
70493 + ___retval = vmalloc((unsigned long)___x); \
70494 + ___retval; \
70495 +})
70496 +
70497 +#define __vmalloc(x, y, z) \
70498 +({ \
70499 + void *___retval; \
70500 + intoverflow_t ___x = (intoverflow_t)x; \
70501 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
70502 + ___retval = NULL; \
70503 + else \
70504 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
70505 + ___retval; \
70506 +})
70507 +
70508 +#define vmalloc_user(x) \
70509 +({ \
70510 + void *___retval; \
70511 + intoverflow_t ___x = (intoverflow_t)x; \
70512 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
70513 + ___retval = NULL; \
70514 + else \
70515 + ___retval = vmalloc_user((unsigned long)___x); \
70516 + ___retval; \
70517 +})
70518 +
70519 +#define vmalloc_exec(x) \
70520 +({ \
70521 + void *___retval; \
70522 + intoverflow_t ___x = (intoverflow_t)x; \
70523 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
70524 + ___retval = NULL; \
70525 + else \
70526 + ___retval = vmalloc_exec((unsigned long)___x); \
70527 + ___retval; \
70528 +})
70529 +
70530 +#define vmalloc_node(x, y) \
70531 +({ \
70532 + void *___retval; \
70533 + intoverflow_t ___x = (intoverflow_t)x; \
70534 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
70535 + ___retval = NULL; \
70536 + else \
70537 + ___retval = vmalloc_node((unsigned long)___x, (y));\
70538 + ___retval; \
70539 +})
70540 +
70541 +#define vmalloc_32(x) \
70542 +({ \
70543 + void *___retval; \
70544 + intoverflow_t ___x = (intoverflow_t)x; \
70545 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
70546 + ___retval = NULL; \
70547 + else \
70548 + ___retval = vmalloc_32((unsigned long)___x); \
70549 + ___retval; \
70550 +})
70551 +
70552 +#define vmalloc_32_user(x) \
70553 +({ \
70554 + void *___retval; \
70555 + intoverflow_t ___x = (intoverflow_t)x; \
70556 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
70557 + ___retval = NULL; \
70558 + else \
70559 + ___retval = vmalloc_32_user((unsigned long)___x);\
70560 + ___retval; \
70561 +})
70562 +
70563 #endif /* _LINUX_VMALLOC_H */
70564 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
70565 index 13070d6..aa4159a 100644
70566 --- a/include/linux/vmstat.h
70567 +++ b/include/linux/vmstat.h
70568 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
70569 /*
70570 * Zone based page accounting with per cpu differentials.
70571 */
70572 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70573 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70574
70575 static inline void zone_page_state_add(long x, struct zone *zone,
70576 enum zone_stat_item item)
70577 {
70578 - atomic_long_add(x, &zone->vm_stat[item]);
70579 - atomic_long_add(x, &vm_stat[item]);
70580 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
70581 + atomic_long_add_unchecked(x, &vm_stat[item]);
70582 }
70583
70584 static inline unsigned long global_page_state(enum zone_stat_item item)
70585 {
70586 - long x = atomic_long_read(&vm_stat[item]);
70587 + long x = atomic_long_read_unchecked(&vm_stat[item]);
70588 #ifdef CONFIG_SMP
70589 if (x < 0)
70590 x = 0;
70591 @@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
70592 static inline unsigned long zone_page_state(struct zone *zone,
70593 enum zone_stat_item item)
70594 {
70595 - long x = atomic_long_read(&zone->vm_stat[item]);
70596 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70597 #ifdef CONFIG_SMP
70598 if (x < 0)
70599 x = 0;
70600 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
70601 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
70602 enum zone_stat_item item)
70603 {
70604 - long x = atomic_long_read(&zone->vm_stat[item]);
70605 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70606
70607 #ifdef CONFIG_SMP
70608 int cpu;
70609 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
70610
70611 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
70612 {
70613 - atomic_long_inc(&zone->vm_stat[item]);
70614 - atomic_long_inc(&vm_stat[item]);
70615 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
70616 + atomic_long_inc_unchecked(&vm_stat[item]);
70617 }
70618
70619 static inline void __inc_zone_page_state(struct page *page,
70620 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
70621
70622 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
70623 {
70624 - atomic_long_dec(&zone->vm_stat[item]);
70625 - atomic_long_dec(&vm_stat[item]);
70626 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
70627 + atomic_long_dec_unchecked(&vm_stat[item]);
70628 }
70629
70630 static inline void __dec_zone_page_state(struct page *page,
70631 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
70632 index 5c84af8..1a3b6e2 100644
70633 --- a/include/linux/xattr.h
70634 +++ b/include/linux/xattr.h
70635 @@ -33,6 +33,11 @@
70636 #define XATTR_USER_PREFIX "user."
70637 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
70638
70639 +/* User namespace */
70640 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
70641 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
70642 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
70643 +
70644 struct inode;
70645 struct dentry;
70646
70647 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
70648 index eed5fcc..5080d24 100644
70649 --- a/include/media/saa7146_vv.h
70650 +++ b/include/media/saa7146_vv.h
70651 @@ -167,7 +167,7 @@ struct saa7146_ext_vv
70652 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
70653
70654 /* the extension can override this */
70655 - struct v4l2_ioctl_ops ops;
70656 + v4l2_ioctl_ops_no_const ops;
70657 /* pointer to the saa7146 core ops */
70658 const struct v4l2_ioctl_ops *core_ops;
70659
70660 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
70661 index 73c9867..2da8837 100644
70662 --- a/include/media/v4l2-dev.h
70663 +++ b/include/media/v4l2-dev.h
70664 @@ -34,7 +34,7 @@ struct v4l2_device;
70665 #define V4L2_FL_UNREGISTERED (0)
70666
70667 struct v4l2_file_operations {
70668 - struct module *owner;
70669 + struct module * const owner;
70670 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
70671 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
70672 unsigned int (*poll) (struct file *, struct poll_table_struct *);
70673 @@ -46,6 +46,7 @@ struct v4l2_file_operations {
70674 int (*open) (struct file *);
70675 int (*release) (struct file *);
70676 };
70677 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
70678
70679 /*
70680 * Newer version of video_device, handled by videodev2.c
70681 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
70682 index 5d5d550..f559ef1 100644
70683 --- a/include/media/v4l2-device.h
70684 +++ b/include/media/v4l2-device.h
70685 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
70686 this function returns 0. If the name ends with a digit (e.g. cx18),
70687 then the name will be set to cx18-0 since cx180 looks really odd. */
70688 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
70689 - atomic_t *instance);
70690 + atomic_unchecked_t *instance);
70691
70692 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
70693 Since the parent disappears this ensures that v4l2_dev doesn't have an
70694 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
70695 index 7a4529d..7244290 100644
70696 --- a/include/media/v4l2-ioctl.h
70697 +++ b/include/media/v4l2-ioctl.h
70698 @@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
70699 long (*vidioc_default) (struct file *file, void *fh,
70700 int cmd, void *arg);
70701 };
70702 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
70703
70704
70705 /* v4l debugging and diagnostics */
70706 diff --git a/include/net/flow.h b/include/net/flow.h
70707 index 809970b..c3df4f3 100644
70708 --- a/include/net/flow.h
70709 +++ b/include/net/flow.h
70710 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
70711 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
70712 u8 dir, flow_resolve_t resolver);
70713 extern void flow_cache_flush(void);
70714 -extern atomic_t flow_cache_genid;
70715 +extern atomic_unchecked_t flow_cache_genid;
70716
70717 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
70718 {
70719 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
70720 index 15e1f8fe..668837c 100644
70721 --- a/include/net/inetpeer.h
70722 +++ b/include/net/inetpeer.h
70723 @@ -24,7 +24,7 @@ struct inet_peer
70724 __u32 dtime; /* the time of last use of not
70725 * referenced entries */
70726 atomic_t refcnt;
70727 - atomic_t rid; /* Frag reception counter */
70728 + atomic_unchecked_t rid; /* Frag reception counter */
70729 __u32 tcp_ts;
70730 unsigned long tcp_ts_stamp;
70731 };
70732 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
70733 index 98978e7..2243a3d 100644
70734 --- a/include/net/ip_vs.h
70735 +++ b/include/net/ip_vs.h
70736 @@ -365,7 +365,7 @@ struct ip_vs_conn {
70737 struct ip_vs_conn *control; /* Master control connection */
70738 atomic_t n_control; /* Number of controlled ones */
70739 struct ip_vs_dest *dest; /* real server */
70740 - atomic_t in_pkts; /* incoming packet counter */
70741 + atomic_unchecked_t in_pkts; /* incoming packet counter */
70742
70743 /* packet transmitter for different forwarding methods. If it
70744 mangles the packet, it must return NF_DROP or better NF_STOLEN,
70745 @@ -466,7 +466,7 @@ struct ip_vs_dest {
70746 union nf_inet_addr addr; /* IP address of the server */
70747 __be16 port; /* port number of the server */
70748 volatile unsigned flags; /* dest status flags */
70749 - atomic_t conn_flags; /* flags to copy to conn */
70750 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
70751 atomic_t weight; /* server weight */
70752
70753 atomic_t refcnt; /* reference counter */
70754 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
70755 index 69b610a..fe3962c 100644
70756 --- a/include/net/irda/ircomm_core.h
70757 +++ b/include/net/irda/ircomm_core.h
70758 @@ -51,7 +51,7 @@ typedef struct {
70759 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
70760 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
70761 struct ircomm_info *);
70762 -} call_t;
70763 +} __no_const call_t;
70764
70765 struct ircomm_cb {
70766 irda_queue_t queue;
70767 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
70768 index eea2e61..08c692d 100644
70769 --- a/include/net/irda/ircomm_tty.h
70770 +++ b/include/net/irda/ircomm_tty.h
70771 @@ -35,6 +35,7 @@
70772 #include <linux/termios.h>
70773 #include <linux/timer.h>
70774 #include <linux/tty.h> /* struct tty_struct */
70775 +#include <asm/local.h>
70776
70777 #include <net/irda/irias_object.h>
70778 #include <net/irda/ircomm_core.h>
70779 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
70780 unsigned short close_delay;
70781 unsigned short closing_wait; /* time to wait before closing */
70782
70783 - int open_count;
70784 - int blocked_open; /* # of blocked opens */
70785 + local_t open_count;
70786 + local_t blocked_open; /* # of blocked opens */
70787
70788 /* Protect concurent access to :
70789 * o self->open_count
70790 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
70791 index f82a1e8..82d81e8 100644
70792 --- a/include/net/iucv/af_iucv.h
70793 +++ b/include/net/iucv/af_iucv.h
70794 @@ -87,7 +87,7 @@ struct iucv_sock {
70795 struct iucv_sock_list {
70796 struct hlist_head head;
70797 rwlock_t lock;
70798 - atomic_t autobind_name;
70799 + atomic_unchecked_t autobind_name;
70800 };
70801
70802 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
70803 diff --git a/include/net/lapb.h b/include/net/lapb.h
70804 index 96cb5dd..25e8d4f 100644
70805 --- a/include/net/lapb.h
70806 +++ b/include/net/lapb.h
70807 @@ -95,7 +95,7 @@ struct lapb_cb {
70808 struct sk_buff_head write_queue;
70809 struct sk_buff_head ack_queue;
70810 unsigned char window;
70811 - struct lapb_register_struct callbacks;
70812 + struct lapb_register_struct *callbacks;
70813
70814 /* FRMR control information */
70815 struct lapb_frame frmr_data;
70816 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
70817 index 3817fda..cdb2343 100644
70818 --- a/include/net/neighbour.h
70819 +++ b/include/net/neighbour.h
70820 @@ -131,7 +131,7 @@ struct neigh_ops
70821 int (*connected_output)(struct sk_buff*);
70822 int (*hh_output)(struct sk_buff*);
70823 int (*queue_xmit)(struct sk_buff*);
70824 -};
70825 +} __do_const;
70826
70827 struct pneigh_entry
70828 {
70829 diff --git a/include/net/netlink.h b/include/net/netlink.h
70830 index c344646..4778c71 100644
70831 --- a/include/net/netlink.h
70832 +++ b/include/net/netlink.h
70833 @@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
70834 {
70835 return (remaining >= (int) sizeof(struct nlmsghdr) &&
70836 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
70837 - nlh->nlmsg_len <= remaining);
70838 + nlh->nlmsg_len <= (unsigned int)remaining);
70839 }
70840
70841 /**
70842 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
70843 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
70844 {
70845 if (mark)
70846 - skb_trim(skb, (unsigned char *) mark - skb->data);
70847 + skb_trim(skb, (const unsigned char *) mark - skb->data);
70848 }
70849
70850 /**
70851 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
70852 index 9a4b8b7..e49e077 100644
70853 --- a/include/net/netns/ipv4.h
70854 +++ b/include/net/netns/ipv4.h
70855 @@ -54,7 +54,7 @@ struct netns_ipv4 {
70856 int current_rt_cache_rebuild_count;
70857
70858 struct timer_list rt_secret_timer;
70859 - atomic_t rt_genid;
70860 + atomic_unchecked_t rt_genid;
70861
70862 #ifdef CONFIG_IP_MROUTE
70863 struct sock *mroute_sk;
70864 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
70865 index 8a6d529..171f401 100644
70866 --- a/include/net/sctp/sctp.h
70867 +++ b/include/net/sctp/sctp.h
70868 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
70869
70870 #else /* SCTP_DEBUG */
70871
70872 -#define SCTP_DEBUG_PRINTK(whatever...)
70873 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
70874 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
70875 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
70876 #define SCTP_ENABLE_DEBUG
70877 #define SCTP_DISABLE_DEBUG
70878 #define SCTP_ASSERT(expr, str, func)
70879 diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
70880 index d97f689..f3b90ab 100644
70881 --- a/include/net/secure_seq.h
70882 +++ b/include/net/secure_seq.h
70883 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
70884 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
70885 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
70886 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
70887 - __be16 dport);
70888 + __be16 dport);
70889 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
70890 __be16 sport, __be16 dport);
70891 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70892 - __be16 sport, __be16 dport);
70893 + __be16 sport, __be16 dport);
70894 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
70895 - __be16 sport, __be16 dport);
70896 + __be16 sport, __be16 dport);
70897 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70898 - __be16 sport, __be16 dport);
70899 + __be16 sport, __be16 dport);
70900
70901 #endif /* _NET_SECURE_SEQ */
70902 diff --git a/include/net/sock.h b/include/net/sock.h
70903 index 78adf52..99afd29 100644
70904 --- a/include/net/sock.h
70905 +++ b/include/net/sock.h
70906 @@ -272,7 +272,7 @@ struct sock {
70907 rwlock_t sk_callback_lock;
70908 int sk_err,
70909 sk_err_soft;
70910 - atomic_t sk_drops;
70911 + atomic_unchecked_t sk_drops;
70912 unsigned short sk_ack_backlog;
70913 unsigned short sk_max_ack_backlog;
70914 __u32 sk_priority;
70915 @@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
70916 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
70917 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
70918 #else
70919 -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
70920 +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
70921 int inc)
70922 {
70923 }
70924 diff --git a/include/net/tcp.h b/include/net/tcp.h
70925 index 6cfe18b..dd21acb 100644
70926 --- a/include/net/tcp.h
70927 +++ b/include/net/tcp.h
70928 @@ -1444,8 +1444,8 @@ enum tcp_seq_states {
70929 struct tcp_seq_afinfo {
70930 char *name;
70931 sa_family_t family;
70932 - struct file_operations seq_fops;
70933 - struct seq_operations seq_ops;
70934 + file_operations_no_const seq_fops;
70935 + seq_operations_no_const seq_ops;
70936 };
70937
70938 struct tcp_iter_state {
70939 diff --git a/include/net/udp.h b/include/net/udp.h
70940 index f98abd2..b4b042f 100644
70941 --- a/include/net/udp.h
70942 +++ b/include/net/udp.h
70943 @@ -187,8 +187,8 @@ struct udp_seq_afinfo {
70944 char *name;
70945 sa_family_t family;
70946 struct udp_table *udp_table;
70947 - struct file_operations seq_fops;
70948 - struct seq_operations seq_ops;
70949 + file_operations_no_const seq_fops;
70950 + seq_operations_no_const seq_ops;
70951 };
70952
70953 struct udp_iter_state {
70954 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
70955 index cbb822e..e9c1cbe 100644
70956 --- a/include/rdma/iw_cm.h
70957 +++ b/include/rdma/iw_cm.h
70958 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
70959 int backlog);
70960
70961 int (*destroy_listen)(struct iw_cm_id *cm_id);
70962 -};
70963 +} __no_const;
70964
70965 /**
70966 * iw_create_cm_id - Create an IW CM identifier.
70967 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
70968 index 09a124b..caa8ca8 100644
70969 --- a/include/scsi/libfc.h
70970 +++ b/include/scsi/libfc.h
70971 @@ -675,6 +675,7 @@ struct libfc_function_template {
70972 */
70973 void (*disc_stop_final) (struct fc_lport *);
70974 };
70975 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
70976
70977 /* information used by the discovery layer */
70978 struct fc_disc {
70979 @@ -707,7 +708,7 @@ struct fc_lport {
70980 struct fc_disc disc;
70981
70982 /* Operational Information */
70983 - struct libfc_function_template tt;
70984 + libfc_function_template_no_const tt;
70985 u8 link_up;
70986 u8 qfull;
70987 enum fc_lport_state state;
70988 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
70989 index de8e180..f15e0d7 100644
70990 --- a/include/scsi/scsi_device.h
70991 +++ b/include/scsi/scsi_device.h
70992 @@ -156,9 +156,9 @@ struct scsi_device {
70993 unsigned int max_device_blocked; /* what device_blocked counts down from */
70994 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
70995
70996 - atomic_t iorequest_cnt;
70997 - atomic_t iodone_cnt;
70998 - atomic_t ioerr_cnt;
70999 + atomic_unchecked_t iorequest_cnt;
71000 + atomic_unchecked_t iodone_cnt;
71001 + atomic_unchecked_t ioerr_cnt;
71002
71003 struct device sdev_gendev,
71004 sdev_dev;
71005 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
71006 index fc50bd6..81ba9cb 100644
71007 --- a/include/scsi/scsi_transport_fc.h
71008 +++ b/include/scsi/scsi_transport_fc.h
71009 @@ -708,7 +708,7 @@ struct fc_function_template {
71010 unsigned long show_host_system_hostname:1;
71011
71012 unsigned long disable_target_scan:1;
71013 -};
71014 +} __do_const;
71015
71016
71017 /**
71018 diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
71019 index 3dae3f7..8440d6f 100644
71020 --- a/include/sound/ac97_codec.h
71021 +++ b/include/sound/ac97_codec.h
71022 @@ -419,15 +419,15 @@
71023 struct snd_ac97;
71024
71025 struct snd_ac97_build_ops {
71026 - int (*build_3d) (struct snd_ac97 *ac97);
71027 - int (*build_specific) (struct snd_ac97 *ac97);
71028 - int (*build_spdif) (struct snd_ac97 *ac97);
71029 - int (*build_post_spdif) (struct snd_ac97 *ac97);
71030 + int (* const build_3d) (struct snd_ac97 *ac97);
71031 + int (* const build_specific) (struct snd_ac97 *ac97);
71032 + int (* const build_spdif) (struct snd_ac97 *ac97);
71033 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
71034 #ifdef CONFIG_PM
71035 - void (*suspend) (struct snd_ac97 *ac97);
71036 - void (*resume) (struct snd_ac97 *ac97);
71037 + void (* const suspend) (struct snd_ac97 *ac97);
71038 + void (* const resume) (struct snd_ac97 *ac97);
71039 #endif
71040 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
71041 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
71042 };
71043
71044 struct snd_ac97_bus_ops {
71045 @@ -477,7 +477,7 @@ struct snd_ac97_template {
71046
71047 struct snd_ac97 {
71048 /* -- lowlevel (hardware) driver specific -- */
71049 - struct snd_ac97_build_ops * build_ops;
71050 + const struct snd_ac97_build_ops * build_ops;
71051 void *private_data;
71052 void (*private_free) (struct snd_ac97 *ac97);
71053 /* --- */
71054 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
71055 index 891cf1a..a94ba2b 100644
71056 --- a/include/sound/ak4xxx-adda.h
71057 +++ b/include/sound/ak4xxx-adda.h
71058 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
71059 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
71060 unsigned char val);
71061 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
71062 -};
71063 +} __no_const;
71064
71065 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
71066
71067 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
71068 index 8c05e47..2b5df97 100644
71069 --- a/include/sound/hwdep.h
71070 +++ b/include/sound/hwdep.h
71071 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
71072 struct snd_hwdep_dsp_status *status);
71073 int (*dsp_load)(struct snd_hwdep *hw,
71074 struct snd_hwdep_dsp_image *image);
71075 -};
71076 +} __no_const;
71077
71078 struct snd_hwdep {
71079 struct snd_card *card;
71080 diff --git a/include/sound/info.h b/include/sound/info.h
71081 index 112e894..6fda5b5 100644
71082 --- a/include/sound/info.h
71083 +++ b/include/sound/info.h
71084 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
71085 struct snd_info_buffer *buffer);
71086 void (*write)(struct snd_info_entry *entry,
71087 struct snd_info_buffer *buffer);
71088 -};
71089 +} __no_const;
71090
71091 struct snd_info_entry_ops {
71092 int (*open)(struct snd_info_entry *entry,
71093 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
71094 index de6d981..590a550 100644
71095 --- a/include/sound/pcm.h
71096 +++ b/include/sound/pcm.h
71097 @@ -80,6 +80,7 @@ struct snd_pcm_ops {
71098 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
71099 int (*ack)(struct snd_pcm_substream *substream);
71100 };
71101 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
71102
71103 /*
71104 *
71105 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
71106 index 736eac7..fe8a80f 100644
71107 --- a/include/sound/sb16_csp.h
71108 +++ b/include/sound/sb16_csp.h
71109 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
71110 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
71111 int (*csp_stop) (struct snd_sb_csp * p);
71112 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
71113 -};
71114 +} __no_const;
71115
71116 /*
71117 * CSP private data
71118 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
71119 index 444cd6b..3327cc5 100644
71120 --- a/include/sound/ymfpci.h
71121 +++ b/include/sound/ymfpci.h
71122 @@ -358,7 +358,7 @@ struct snd_ymfpci {
71123 spinlock_t reg_lock;
71124 spinlock_t voice_lock;
71125 wait_queue_head_t interrupt_sleep;
71126 - atomic_t interrupt_sleep_count;
71127 + atomic_unchecked_t interrupt_sleep_count;
71128 struct snd_info_entry *proc_entry;
71129 const struct firmware *dsp_microcode;
71130 const struct firmware *controller_microcode;
71131 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
71132 index b89f9db..f097b38 100644
71133 --- a/include/trace/events/irq.h
71134 +++ b/include/trace/events/irq.h
71135 @@ -34,7 +34,7 @@
71136 */
71137 TRACE_EVENT(irq_handler_entry,
71138
71139 - TP_PROTO(int irq, struct irqaction *action),
71140 + TP_PROTO(int irq, const struct irqaction *action),
71141
71142 TP_ARGS(irq, action),
71143
71144 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
71145 */
71146 TRACE_EVENT(irq_handler_exit,
71147
71148 - TP_PROTO(int irq, struct irqaction *action, int ret),
71149 + TP_PROTO(int irq, const struct irqaction *action, int ret),
71150
71151 TP_ARGS(irq, action, ret),
71152
71153 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
71154 */
71155 TRACE_EVENT(softirq_entry,
71156
71157 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
71158 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
71159
71160 TP_ARGS(h, vec),
71161
71162 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
71163 */
71164 TRACE_EVENT(softirq_exit,
71165
71166 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
71167 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
71168
71169 TP_ARGS(h, vec),
71170
71171 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
71172 index 0993a22..32ba2fe 100644
71173 --- a/include/video/uvesafb.h
71174 +++ b/include/video/uvesafb.h
71175 @@ -177,6 +177,7 @@ struct uvesafb_par {
71176 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
71177 u8 pmi_setpal; /* PMI for palette changes */
71178 u16 *pmi_base; /* protected mode interface location */
71179 + u8 *pmi_code; /* protected mode code location */
71180 void *pmi_start;
71181 void *pmi_pal;
71182 u8 *vbe_state_orig; /*
71183 diff --git a/init/Kconfig b/init/Kconfig
71184 index d72691b..3996e54 100644
71185 --- a/init/Kconfig
71186 +++ b/init/Kconfig
71187 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
71188
71189 config COMPAT_BRK
71190 bool "Disable heap randomization"
71191 - default y
71192 + default n
71193 help
71194 Randomizing heap placement makes heap exploits harder, but it
71195 also breaks ancient binaries (including anything libc5 based).
71196 diff --git a/init/do_mounts.c b/init/do_mounts.c
71197 index bb008d0..4fa3933 100644
71198 --- a/init/do_mounts.c
71199 +++ b/init/do_mounts.c
71200 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
71201
71202 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
71203 {
71204 - int err = sys_mount(name, "/root", fs, flags, data);
71205 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
71206 if (err)
71207 return err;
71208
71209 - sys_chdir("/root");
71210 + sys_chdir((__force const char __user *)"/root");
71211 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
71212 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
71213 current->fs->pwd.mnt->mnt_sb->s_type->name,
71214 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
71215 va_start(args, fmt);
71216 vsprintf(buf, fmt, args);
71217 va_end(args);
71218 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
71219 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
71220 if (fd >= 0) {
71221 sys_ioctl(fd, FDEJECT, 0);
71222 sys_close(fd);
71223 }
71224 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
71225 - fd = sys_open("/dev/console", O_RDWR, 0);
71226 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
71227 if (fd >= 0) {
71228 sys_ioctl(fd, TCGETS, (long)&termios);
71229 termios.c_lflag &= ~ICANON;
71230 sys_ioctl(fd, TCSETSF, (long)&termios);
71231 - sys_read(fd, &c, 1);
71232 + sys_read(fd, (char __user *)&c, 1);
71233 termios.c_lflag |= ICANON;
71234 sys_ioctl(fd, TCSETSF, (long)&termios);
71235 sys_close(fd);
71236 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
71237 mount_root();
71238 out:
71239 devtmpfs_mount("dev");
71240 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
71241 - sys_chroot(".");
71242 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
71243 + sys_chroot((__force char __user *)".");
71244 }
71245 diff --git a/init/do_mounts.h b/init/do_mounts.h
71246 index f5b978a..69dbfe8 100644
71247 --- a/init/do_mounts.h
71248 +++ b/init/do_mounts.h
71249 @@ -15,15 +15,15 @@ extern int root_mountflags;
71250
71251 static inline int create_dev(char *name, dev_t dev)
71252 {
71253 - sys_unlink(name);
71254 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
71255 + sys_unlink((char __force_user *)name);
71256 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
71257 }
71258
71259 #if BITS_PER_LONG == 32
71260 static inline u32 bstat(char *name)
71261 {
71262 struct stat64 stat;
71263 - if (sys_stat64(name, &stat) != 0)
71264 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
71265 return 0;
71266 if (!S_ISBLK(stat.st_mode))
71267 return 0;
71268 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
71269 static inline u32 bstat(char *name)
71270 {
71271 struct stat stat;
71272 - if (sys_newstat(name, &stat) != 0)
71273 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
71274 return 0;
71275 if (!S_ISBLK(stat.st_mode))
71276 return 0;
71277 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
71278 index 614241b..4da046b 100644
71279 --- a/init/do_mounts_initrd.c
71280 +++ b/init/do_mounts_initrd.c
71281 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
71282 sys_close(old_fd);sys_close(root_fd);
71283 sys_close(0);sys_close(1);sys_close(2);
71284 sys_setsid();
71285 - (void) sys_open("/dev/console",O_RDWR,0);
71286 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
71287 (void) sys_dup(0);
71288 (void) sys_dup(0);
71289 return kernel_execve(shell, argv, envp_init);
71290 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
71291 create_dev("/dev/root.old", Root_RAM0);
71292 /* mount initrd on rootfs' /root */
71293 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
71294 - sys_mkdir("/old", 0700);
71295 - root_fd = sys_open("/", 0, 0);
71296 - old_fd = sys_open("/old", 0, 0);
71297 + sys_mkdir((const char __force_user *)"/old", 0700);
71298 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
71299 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
71300 /* move initrd over / and chdir/chroot in initrd root */
71301 - sys_chdir("/root");
71302 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
71303 - sys_chroot(".");
71304 + sys_chdir((const char __force_user *)"/root");
71305 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
71306 + sys_chroot((const char __force_user *)".");
71307
71308 /*
71309 * In case that a resume from disk is carried out by linuxrc or one of
71310 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
71311
71312 /* move initrd to rootfs' /old */
71313 sys_fchdir(old_fd);
71314 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
71315 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
71316 /* switch root and cwd back to / of rootfs */
71317 sys_fchdir(root_fd);
71318 - sys_chroot(".");
71319 + sys_chroot((const char __force_user *)".");
71320 sys_close(old_fd);
71321 sys_close(root_fd);
71322
71323 if (new_decode_dev(real_root_dev) == Root_RAM0) {
71324 - sys_chdir("/old");
71325 + sys_chdir((const char __force_user *)"/old");
71326 return;
71327 }
71328
71329 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
71330 mount_root();
71331
71332 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
71333 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
71334 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
71335 if (!error)
71336 printk("okay\n");
71337 else {
71338 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
71339 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
71340 if (error == -ENOENT)
71341 printk("/initrd does not exist. Ignored.\n");
71342 else
71343 printk("failed\n");
71344 printk(KERN_NOTICE "Unmounting old root\n");
71345 - sys_umount("/old", MNT_DETACH);
71346 + sys_umount((char __force_user *)"/old", MNT_DETACH);
71347 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
71348 if (fd < 0) {
71349 error = fd;
71350 @@ -119,11 +119,11 @@ int __init initrd_load(void)
71351 * mounted in the normal path.
71352 */
71353 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
71354 - sys_unlink("/initrd.image");
71355 + sys_unlink((const char __force_user *)"/initrd.image");
71356 handle_initrd();
71357 return 1;
71358 }
71359 }
71360 - sys_unlink("/initrd.image");
71361 + sys_unlink((const char __force_user *)"/initrd.image");
71362 return 0;
71363 }
71364 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
71365 index 69aebbf..c0bf6a7 100644
71366 --- a/init/do_mounts_md.c
71367 +++ b/init/do_mounts_md.c
71368 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
71369 partitioned ? "_d" : "", minor,
71370 md_setup_args[ent].device_names);
71371
71372 - fd = sys_open(name, 0, 0);
71373 + fd = sys_open((char __force_user *)name, 0, 0);
71374 if (fd < 0) {
71375 printk(KERN_ERR "md: open failed - cannot start "
71376 "array %s\n", name);
71377 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
71378 * array without it
71379 */
71380 sys_close(fd);
71381 - fd = sys_open(name, 0, 0);
71382 + fd = sys_open((char __force_user *)name, 0, 0);
71383 sys_ioctl(fd, BLKRRPART, 0);
71384 }
71385 sys_close(fd);
71386 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
71387
71388 wait_for_device_probe();
71389
71390 - fd = sys_open("/dev/md0", 0, 0);
71391 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
71392 if (fd >= 0) {
71393 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
71394 sys_close(fd);
71395 diff --git a/init/initramfs.c b/init/initramfs.c
71396 index 1fd59b8..a01b079 100644
71397 --- a/init/initramfs.c
71398 +++ b/init/initramfs.c
71399 @@ -74,7 +74,7 @@ static void __init free_hash(void)
71400 }
71401 }
71402
71403 -static long __init do_utime(char __user *filename, time_t mtime)
71404 +static long __init do_utime(__force char __user *filename, time_t mtime)
71405 {
71406 struct timespec t[2];
71407
71408 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
71409 struct dir_entry *de, *tmp;
71410 list_for_each_entry_safe(de, tmp, &dir_list, list) {
71411 list_del(&de->list);
71412 - do_utime(de->name, de->mtime);
71413 + do_utime((char __force_user *)de->name, de->mtime);
71414 kfree(de->name);
71415 kfree(de);
71416 }
71417 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
71418 if (nlink >= 2) {
71419 char *old = find_link(major, minor, ino, mode, collected);
71420 if (old)
71421 - return (sys_link(old, collected) < 0) ? -1 : 1;
71422 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
71423 }
71424 return 0;
71425 }
71426 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
71427 {
71428 struct stat st;
71429
71430 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
71431 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
71432 if (S_ISDIR(st.st_mode))
71433 - sys_rmdir(path);
71434 + sys_rmdir((char __force_user *)path);
71435 else
71436 - sys_unlink(path);
71437 + sys_unlink((char __force_user *)path);
71438 }
71439 }
71440
71441 @@ -305,7 +305,7 @@ static int __init do_name(void)
71442 int openflags = O_WRONLY|O_CREAT;
71443 if (ml != 1)
71444 openflags |= O_TRUNC;
71445 - wfd = sys_open(collected, openflags, mode);
71446 + wfd = sys_open((char __force_user *)collected, openflags, mode);
71447
71448 if (wfd >= 0) {
71449 sys_fchown(wfd, uid, gid);
71450 @@ -317,17 +317,17 @@ static int __init do_name(void)
71451 }
71452 }
71453 } else if (S_ISDIR(mode)) {
71454 - sys_mkdir(collected, mode);
71455 - sys_chown(collected, uid, gid);
71456 - sys_chmod(collected, mode);
71457 + sys_mkdir((char __force_user *)collected, mode);
71458 + sys_chown((char __force_user *)collected, uid, gid);
71459 + sys_chmod((char __force_user *)collected, mode);
71460 dir_add(collected, mtime);
71461 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
71462 S_ISFIFO(mode) || S_ISSOCK(mode)) {
71463 if (maybe_link() == 0) {
71464 - sys_mknod(collected, mode, rdev);
71465 - sys_chown(collected, uid, gid);
71466 - sys_chmod(collected, mode);
71467 - do_utime(collected, mtime);
71468 + sys_mknod((char __force_user *)collected, mode, rdev);
71469 + sys_chown((char __force_user *)collected, uid, gid);
71470 + sys_chmod((char __force_user *)collected, mode);
71471 + do_utime((char __force_user *)collected, mtime);
71472 }
71473 }
71474 return 0;
71475 @@ -336,15 +336,15 @@ static int __init do_name(void)
71476 static int __init do_copy(void)
71477 {
71478 if (count >= body_len) {
71479 - sys_write(wfd, victim, body_len);
71480 + sys_write(wfd, (char __force_user *)victim, body_len);
71481 sys_close(wfd);
71482 - do_utime(vcollected, mtime);
71483 + do_utime((char __force_user *)vcollected, mtime);
71484 kfree(vcollected);
71485 eat(body_len);
71486 state = SkipIt;
71487 return 0;
71488 } else {
71489 - sys_write(wfd, victim, count);
71490 + sys_write(wfd, (char __force_user *)victim, count);
71491 body_len -= count;
71492 eat(count);
71493 return 1;
71494 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
71495 {
71496 collected[N_ALIGN(name_len) + body_len] = '\0';
71497 clean_path(collected, 0);
71498 - sys_symlink(collected + N_ALIGN(name_len), collected);
71499 - sys_lchown(collected, uid, gid);
71500 - do_utime(collected, mtime);
71501 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
71502 + sys_lchown((char __force_user *)collected, uid, gid);
71503 + do_utime((char __force_user *)collected, mtime);
71504 state = SkipIt;
71505 next_state = Reset;
71506 return 0;
71507 diff --git a/init/main.c b/init/main.c
71508 index 1eb4bd5..fea5bbe 100644
71509 --- a/init/main.c
71510 +++ b/init/main.c
71511 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
71512 #ifdef CONFIG_TC
71513 extern void tc_init(void);
71514 #endif
71515 +extern void grsecurity_init(void);
71516
71517 enum system_states system_state __read_mostly;
71518 EXPORT_SYMBOL(system_state);
71519 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
71520
71521 __setup("reset_devices", set_reset_devices);
71522
71523 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
71524 +extern char pax_enter_kernel_user[];
71525 +extern char pax_exit_kernel_user[];
71526 +extern pgdval_t clone_pgd_mask;
71527 +#endif
71528 +
71529 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
71530 +static int __init setup_pax_nouderef(char *str)
71531 +{
71532 +#ifdef CONFIG_X86_32
71533 + unsigned int cpu;
71534 + struct desc_struct *gdt;
71535 +
71536 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
71537 + gdt = get_cpu_gdt_table(cpu);
71538 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
71539 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
71540 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
71541 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
71542 + }
71543 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
71544 +#else
71545 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
71546 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
71547 + clone_pgd_mask = ~(pgdval_t)0UL;
71548 +#endif
71549 +
71550 + return 0;
71551 +}
71552 +early_param("pax_nouderef", setup_pax_nouderef);
71553 +#endif
71554 +
71555 +#ifdef CONFIG_PAX_SOFTMODE
71556 +int pax_softmode;
71557 +
71558 +static int __init setup_pax_softmode(char *str)
71559 +{
71560 + get_option(&str, &pax_softmode);
71561 + return 1;
71562 +}
71563 +__setup("pax_softmode=", setup_pax_softmode);
71564 +#endif
71565 +
71566 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
71567 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
71568 static const char *panic_later, *panic_param;
71569 @@ -705,52 +749,53 @@ int initcall_debug;
71570 core_param(initcall_debug, initcall_debug, bool, 0644);
71571
71572 static char msgbuf[64];
71573 -static struct boot_trace_call call;
71574 -static struct boot_trace_ret ret;
71575 +static struct boot_trace_call trace_call;
71576 +static struct boot_trace_ret trace_ret;
71577
71578 int do_one_initcall(initcall_t fn)
71579 {
71580 int count = preempt_count();
71581 ktime_t calltime, delta, rettime;
71582 + const char *msg1 = "", *msg2 = "";
71583
71584 if (initcall_debug) {
71585 - call.caller = task_pid_nr(current);
71586 - printk("calling %pF @ %i\n", fn, call.caller);
71587 + trace_call.caller = task_pid_nr(current);
71588 + printk("calling %pF @ %i\n", fn, trace_call.caller);
71589 calltime = ktime_get();
71590 - trace_boot_call(&call, fn);
71591 + trace_boot_call(&trace_call, fn);
71592 enable_boot_trace();
71593 }
71594
71595 - ret.result = fn();
71596 + trace_ret.result = fn();
71597
71598 if (initcall_debug) {
71599 disable_boot_trace();
71600 rettime = ktime_get();
71601 delta = ktime_sub(rettime, calltime);
71602 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71603 - trace_boot_ret(&ret, fn);
71604 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71605 + trace_boot_ret(&trace_ret, fn);
71606 printk("initcall %pF returned %d after %Ld usecs\n", fn,
71607 - ret.result, ret.duration);
71608 + trace_ret.result, trace_ret.duration);
71609 }
71610
71611 msgbuf[0] = 0;
71612
71613 - if (ret.result && ret.result != -ENODEV && initcall_debug)
71614 - sprintf(msgbuf, "error code %d ", ret.result);
71615 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
71616 + sprintf(msgbuf, "error code %d ", trace_ret.result);
71617
71618 if (preempt_count() != count) {
71619 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
71620 + msg1 = " preemption imbalance";
71621 preempt_count() = count;
71622 }
71623 if (irqs_disabled()) {
71624 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
71625 + msg2 = " disabled interrupts";
71626 local_irq_enable();
71627 }
71628 - if (msgbuf[0]) {
71629 - printk("initcall %pF returned with %s\n", fn, msgbuf);
71630 + if (msgbuf[0] || *msg1 || *msg2) {
71631 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
71632 }
71633
71634 - return ret.result;
71635 + return trace_ret.result;
71636 }
71637
71638
71639 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
71640 if (!ramdisk_execute_command)
71641 ramdisk_execute_command = "/init";
71642
71643 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
71644 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
71645 ramdisk_execute_command = NULL;
71646 prepare_namespace();
71647 }
71648
71649 + grsecurity_init();
71650 +
71651 /*
71652 * Ok, we have completed the initial bootup, and
71653 * we're essentially up and running. Get rid of the
71654 diff --git a/init/noinitramfs.c b/init/noinitramfs.c
71655 index f4c1a3a..96c19bd 100644
71656 --- a/init/noinitramfs.c
71657 +++ b/init/noinitramfs.c
71658 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
71659 {
71660 int err;
71661
71662 - err = sys_mkdir("/dev", 0755);
71663 + err = sys_mkdir((const char __user *)"/dev", 0755);
71664 if (err < 0)
71665 goto out;
71666
71667 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
71668 if (err < 0)
71669 goto out;
71670
71671 - err = sys_mkdir("/root", 0700);
71672 + err = sys_mkdir((const char __user *)"/root", 0700);
71673 if (err < 0)
71674 goto out;
71675
71676 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
71677 index d01bc14..8df81db 100644
71678 --- a/ipc/mqueue.c
71679 +++ b/ipc/mqueue.c
71680 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
71681 mq_bytes = (mq_msg_tblsz +
71682 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
71683
71684 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
71685 spin_lock(&mq_lock);
71686 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
71687 u->mq_bytes + mq_bytes >
71688 diff --git a/ipc/msg.c b/ipc/msg.c
71689 index 779f762..4af9e36 100644
71690 --- a/ipc/msg.c
71691 +++ b/ipc/msg.c
71692 @@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
71693 return security_msg_queue_associate(msq, msgflg);
71694 }
71695
71696 +static struct ipc_ops msg_ops = {
71697 + .getnew = newque,
71698 + .associate = msg_security,
71699 + .more_checks = NULL
71700 +};
71701 +
71702 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
71703 {
71704 struct ipc_namespace *ns;
71705 - struct ipc_ops msg_ops;
71706 struct ipc_params msg_params;
71707
71708 ns = current->nsproxy->ipc_ns;
71709
71710 - msg_ops.getnew = newque;
71711 - msg_ops.associate = msg_security;
71712 - msg_ops.more_checks = NULL;
71713 -
71714 msg_params.key = key;
71715 msg_params.flg = msgflg;
71716
71717 diff --git a/ipc/sem.c b/ipc/sem.c
71718 index b781007..f738b04 100644
71719 --- a/ipc/sem.c
71720 +++ b/ipc/sem.c
71721 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
71722 return 0;
71723 }
71724
71725 +static struct ipc_ops sem_ops = {
71726 + .getnew = newary,
71727 + .associate = sem_security,
71728 + .more_checks = sem_more_checks
71729 +};
71730 +
71731 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71732 {
71733 struct ipc_namespace *ns;
71734 - struct ipc_ops sem_ops;
71735 struct ipc_params sem_params;
71736
71737 ns = current->nsproxy->ipc_ns;
71738 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71739 if (nsems < 0 || nsems > ns->sc_semmsl)
71740 return -EINVAL;
71741
71742 - sem_ops.getnew = newary;
71743 - sem_ops.associate = sem_security;
71744 - sem_ops.more_checks = sem_more_checks;
71745 -
71746 sem_params.key = key;
71747 sem_params.flg = semflg;
71748 sem_params.u.nsems = nsems;
71749 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
71750 ushort* sem_io = fast_sem_io;
71751 int nsems;
71752
71753 + pax_track_stack();
71754 +
71755 sma = sem_lock_check(ns, semid);
71756 if (IS_ERR(sma))
71757 return PTR_ERR(sma);
71758 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
71759 unsigned long jiffies_left = 0;
71760 struct ipc_namespace *ns;
71761
71762 + pax_track_stack();
71763 +
71764 ns = current->nsproxy->ipc_ns;
71765
71766 if (nsops < 1 || semid < 0)
71767 diff --git a/ipc/shm.c b/ipc/shm.c
71768 index d30732c..e4992cd 100644
71769 --- a/ipc/shm.c
71770 +++ b/ipc/shm.c
71771 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
71772 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
71773 #endif
71774
71775 +#ifdef CONFIG_GRKERNSEC
71776 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71777 + const time_t shm_createtime, const uid_t cuid,
71778 + const int shmid);
71779 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71780 + const time_t shm_createtime);
71781 +#endif
71782 +
71783 void shm_init_ns(struct ipc_namespace *ns)
71784 {
71785 ns->shm_ctlmax = SHMMAX;
71786 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
71787 shp->shm_lprid = 0;
71788 shp->shm_atim = shp->shm_dtim = 0;
71789 shp->shm_ctim = get_seconds();
71790 +#ifdef CONFIG_GRKERNSEC
71791 + {
71792 + struct timespec timeval;
71793 + do_posix_clock_monotonic_gettime(&timeval);
71794 +
71795 + shp->shm_createtime = timeval.tv_sec;
71796 + }
71797 +#endif
71798 shp->shm_segsz = size;
71799 shp->shm_nattch = 0;
71800 shp->shm_file = file;
71801 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
71802 return 0;
71803 }
71804
71805 +static struct ipc_ops shm_ops = {
71806 + .getnew = newseg,
71807 + .associate = shm_security,
71808 + .more_checks = shm_more_checks
71809 +};
71810 +
71811 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
71812 {
71813 struct ipc_namespace *ns;
71814 - struct ipc_ops shm_ops;
71815 struct ipc_params shm_params;
71816
71817 ns = current->nsproxy->ipc_ns;
71818
71819 - shm_ops.getnew = newseg;
71820 - shm_ops.associate = shm_security;
71821 - shm_ops.more_checks = shm_more_checks;
71822 -
71823 shm_params.key = key;
71824 shm_params.flg = shmflg;
71825 shm_params.u.size = size;
71826 @@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71827 f_mode = FMODE_READ | FMODE_WRITE;
71828 }
71829 if (shmflg & SHM_EXEC) {
71830 +
71831 +#ifdef CONFIG_PAX_MPROTECT
71832 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
71833 + goto out;
71834 +#endif
71835 +
71836 prot |= PROT_EXEC;
71837 acc_mode |= S_IXUGO;
71838 }
71839 @@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71840 if (err)
71841 goto out_unlock;
71842
71843 +#ifdef CONFIG_GRKERNSEC
71844 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
71845 + shp->shm_perm.cuid, shmid) ||
71846 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
71847 + err = -EACCES;
71848 + goto out_unlock;
71849 + }
71850 +#endif
71851 +
71852 path.dentry = dget(shp->shm_file->f_path.dentry);
71853 path.mnt = shp->shm_file->f_path.mnt;
71854 shp->shm_nattch++;
71855 +#ifdef CONFIG_GRKERNSEC
71856 + shp->shm_lapid = current->pid;
71857 +#endif
71858 size = i_size_read(path.dentry->d_inode);
71859 shm_unlock(shp);
71860
71861 diff --git a/kernel/acct.c b/kernel/acct.c
71862 index a6605ca..ca91111 100644
71863 --- a/kernel/acct.c
71864 +++ b/kernel/acct.c
71865 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
71866 */
71867 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
71868 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
71869 - file->f_op->write(file, (char *)&ac,
71870 + file->f_op->write(file, (char __force_user *)&ac,
71871 sizeof(acct_t), &file->f_pos);
71872 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
71873 set_fs(fs);
71874 diff --git a/kernel/audit.c b/kernel/audit.c
71875 index 5feed23..48415fd 100644
71876 --- a/kernel/audit.c
71877 +++ b/kernel/audit.c
71878 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
71879 3) suppressed due to audit_rate_limit
71880 4) suppressed due to audit_backlog_limit
71881 */
71882 -static atomic_t audit_lost = ATOMIC_INIT(0);
71883 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
71884
71885 /* The netlink socket. */
71886 static struct sock *audit_sock;
71887 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
71888 unsigned long now;
71889 int print;
71890
71891 - atomic_inc(&audit_lost);
71892 + atomic_inc_unchecked(&audit_lost);
71893
71894 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
71895
71896 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
71897 printk(KERN_WARNING
71898 "audit: audit_lost=%d audit_rate_limit=%d "
71899 "audit_backlog_limit=%d\n",
71900 - atomic_read(&audit_lost),
71901 + atomic_read_unchecked(&audit_lost),
71902 audit_rate_limit,
71903 audit_backlog_limit);
71904 audit_panic(message);
71905 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71906 status_set.pid = audit_pid;
71907 status_set.rate_limit = audit_rate_limit;
71908 status_set.backlog_limit = audit_backlog_limit;
71909 - status_set.lost = atomic_read(&audit_lost);
71910 + status_set.lost = atomic_read_unchecked(&audit_lost);
71911 status_set.backlog = skb_queue_len(&audit_skb_queue);
71912 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
71913 &status_set, sizeof(status_set));
71914 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71915 spin_unlock_irq(&tsk->sighand->siglock);
71916 }
71917 read_unlock(&tasklist_lock);
71918 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
71919 - &s, sizeof(s));
71920 +
71921 + if (!err)
71922 + audit_send_reply(NETLINK_CB(skb).pid, seq,
71923 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
71924 break;
71925 }
71926 case AUDIT_TTY_SET: {
71927 @@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
71928 avail = audit_expand(ab,
71929 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
71930 if (!avail)
71931 - goto out;
71932 + goto out_va_end;
71933 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
71934 }
71935 - va_end(args2);
71936 if (len > 0)
71937 skb_put(skb, len);
71938 +out_va_end:
71939 + va_end(args2);
71940 out:
71941 return;
71942 }
71943 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
71944 index 267e484..ac41bc3 100644
71945 --- a/kernel/auditsc.c
71946 +++ b/kernel/auditsc.c
71947 @@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
71948 struct audit_buffer **ab,
71949 struct audit_aux_data_execve *axi)
71950 {
71951 - int i;
71952 - size_t len, len_sent = 0;
71953 + int i, len;
71954 + size_t len_sent = 0;
71955 const char __user *p;
71956 char *buf;
71957
71958 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
71959 }
71960
71961 /* global counter which is incremented every time something logs in */
71962 -static atomic_t session_id = ATOMIC_INIT(0);
71963 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
71964
71965 /**
71966 * audit_set_loginuid - set a task's audit_context loginuid
71967 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
71968 */
71969 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
71970 {
71971 - unsigned int sessionid = atomic_inc_return(&session_id);
71972 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
71973 struct audit_context *context = task->audit_context;
71974
71975 if (context && context->in_syscall) {
71976 diff --git a/kernel/capability.c b/kernel/capability.c
71977 index 8a944f5..db5001e 100644
71978 --- a/kernel/capability.c
71979 +++ b/kernel/capability.c
71980 @@ -305,10 +305,26 @@ int capable(int cap)
71981 BUG();
71982 }
71983
71984 - if (security_capable(cap) == 0) {
71985 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
71986 current->flags |= PF_SUPERPRIV;
71987 return 1;
71988 }
71989 return 0;
71990 }
71991 +
71992 +int capable_nolog(int cap)
71993 +{
71994 + if (unlikely(!cap_valid(cap))) {
71995 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
71996 + BUG();
71997 + }
71998 +
71999 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
72000 + current->flags |= PF_SUPERPRIV;
72001 + return 1;
72002 + }
72003 + return 0;
72004 +}
72005 +
72006 EXPORT_SYMBOL(capable);
72007 +EXPORT_SYMBOL(capable_nolog);
72008 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
72009 index 1fbcc74..7000012 100644
72010 --- a/kernel/cgroup.c
72011 +++ b/kernel/cgroup.c
72012 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
72013 struct hlist_head *hhead;
72014 struct cg_cgroup_link *link;
72015
72016 + pax_track_stack();
72017 +
72018 /* First see if we already have a cgroup group that matches
72019 * the desired set */
72020 read_lock(&css_set_lock);
72021 diff --git a/kernel/compat.c b/kernel/compat.c
72022 index 8bc5578..186e44a 100644
72023 --- a/kernel/compat.c
72024 +++ b/kernel/compat.c
72025 @@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
72026 mm_segment_t oldfs;
72027 long ret;
72028
72029 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
72030 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
72031 oldfs = get_fs();
72032 set_fs(KERNEL_DS);
72033 ret = hrtimer_nanosleep_restart(restart);
72034 @@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
72035 oldfs = get_fs();
72036 set_fs(KERNEL_DS);
72037 ret = hrtimer_nanosleep(&tu,
72038 - rmtp ? (struct timespec __user *)&rmt : NULL,
72039 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
72040 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
72041 set_fs(oldfs);
72042
72043 @@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
72044 mm_segment_t old_fs = get_fs();
72045
72046 set_fs(KERNEL_DS);
72047 - ret = sys_sigpending((old_sigset_t __user *) &s);
72048 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
72049 set_fs(old_fs);
72050 if (ret == 0)
72051 ret = put_user(s, set);
72052 @@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
72053 old_fs = get_fs();
72054 set_fs(KERNEL_DS);
72055 ret = sys_sigprocmask(how,
72056 - set ? (old_sigset_t __user *) &s : NULL,
72057 - oset ? (old_sigset_t __user *) &s : NULL);
72058 + set ? (old_sigset_t __force_user *) &s : NULL,
72059 + oset ? (old_sigset_t __force_user *) &s : NULL);
72060 set_fs(old_fs);
72061 if (ret == 0)
72062 if (oset)
72063 @@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
72064 mm_segment_t old_fs = get_fs();
72065
72066 set_fs(KERNEL_DS);
72067 - ret = sys_old_getrlimit(resource, &r);
72068 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
72069 set_fs(old_fs);
72070
72071 if (!ret) {
72072 @@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
72073 mm_segment_t old_fs = get_fs();
72074
72075 set_fs(KERNEL_DS);
72076 - ret = sys_getrusage(who, (struct rusage __user *) &r);
72077 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
72078 set_fs(old_fs);
72079
72080 if (ret)
72081 @@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
72082 set_fs (KERNEL_DS);
72083 ret = sys_wait4(pid,
72084 (stat_addr ?
72085 - (unsigned int __user *) &status : NULL),
72086 - options, (struct rusage __user *) &r);
72087 + (unsigned int __force_user *) &status : NULL),
72088 + options, (struct rusage __force_user *) &r);
72089 set_fs (old_fs);
72090
72091 if (ret > 0) {
72092 @@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
72093 memset(&info, 0, sizeof(info));
72094
72095 set_fs(KERNEL_DS);
72096 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
72097 - uru ? (struct rusage __user *)&ru : NULL);
72098 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
72099 + uru ? (struct rusage __force_user *)&ru : NULL);
72100 set_fs(old_fs);
72101
72102 if ((ret < 0) || (info.si_signo == 0))
72103 @@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
72104 oldfs = get_fs();
72105 set_fs(KERNEL_DS);
72106 err = sys_timer_settime(timer_id, flags,
72107 - (struct itimerspec __user *) &newts,
72108 - (struct itimerspec __user *) &oldts);
72109 + (struct itimerspec __force_user *) &newts,
72110 + (struct itimerspec __force_user *) &oldts);
72111 set_fs(oldfs);
72112 if (!err && old && put_compat_itimerspec(old, &oldts))
72113 return -EFAULT;
72114 @@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
72115 oldfs = get_fs();
72116 set_fs(KERNEL_DS);
72117 err = sys_timer_gettime(timer_id,
72118 - (struct itimerspec __user *) &ts);
72119 + (struct itimerspec __force_user *) &ts);
72120 set_fs(oldfs);
72121 if (!err && put_compat_itimerspec(setting, &ts))
72122 return -EFAULT;
72123 @@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
72124 oldfs = get_fs();
72125 set_fs(KERNEL_DS);
72126 err = sys_clock_settime(which_clock,
72127 - (struct timespec __user *) &ts);
72128 + (struct timespec __force_user *) &ts);
72129 set_fs(oldfs);
72130 return err;
72131 }
72132 @@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
72133 oldfs = get_fs();
72134 set_fs(KERNEL_DS);
72135 err = sys_clock_gettime(which_clock,
72136 - (struct timespec __user *) &ts);
72137 + (struct timespec __force_user *) &ts);
72138 set_fs(oldfs);
72139 if (!err && put_compat_timespec(&ts, tp))
72140 return -EFAULT;
72141 @@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
72142 oldfs = get_fs();
72143 set_fs(KERNEL_DS);
72144 err = sys_clock_getres(which_clock,
72145 - (struct timespec __user *) &ts);
72146 + (struct timespec __force_user *) &ts);
72147 set_fs(oldfs);
72148 if (!err && tp && put_compat_timespec(&ts, tp))
72149 return -EFAULT;
72150 @@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
72151 long err;
72152 mm_segment_t oldfs;
72153 struct timespec tu;
72154 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
72155 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
72156
72157 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
72158 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
72159 oldfs = get_fs();
72160 set_fs(KERNEL_DS);
72161 err = clock_nanosleep_restart(restart);
72162 @@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
72163 oldfs = get_fs();
72164 set_fs(KERNEL_DS);
72165 err = sys_clock_nanosleep(which_clock, flags,
72166 - (struct timespec __user *) &in,
72167 - (struct timespec __user *) &out);
72168 + (struct timespec __force_user *) &in,
72169 + (struct timespec __force_user *) &out);
72170 set_fs(oldfs);
72171
72172 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
72173 diff --git a/kernel/configs.c b/kernel/configs.c
72174 index abaee68..047facd 100644
72175 --- a/kernel/configs.c
72176 +++ b/kernel/configs.c
72177 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
72178 struct proc_dir_entry *entry;
72179
72180 /* create the current config file */
72181 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
72182 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
72183 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
72184 + &ikconfig_file_ops);
72185 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72186 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
72187 + &ikconfig_file_ops);
72188 +#endif
72189 +#else
72190 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
72191 &ikconfig_file_ops);
72192 +#endif
72193 +
72194 if (!entry)
72195 return -ENOMEM;
72196
72197 diff --git a/kernel/cpu.c b/kernel/cpu.c
72198 index 3f2f04f..4e53ded 100644
72199 --- a/kernel/cpu.c
72200 +++ b/kernel/cpu.c
72201 @@ -20,7 +20,7 @@
72202 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
72203 static DEFINE_MUTEX(cpu_add_remove_lock);
72204
72205 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
72206 +static RAW_NOTIFIER_HEAD(cpu_chain);
72207
72208 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
72209 * Should always be manipulated under cpu_add_remove_lock
72210 diff --git a/kernel/cred.c b/kernel/cred.c
72211 index 0b5b5fc..f7fe51a 100644
72212 --- a/kernel/cred.c
72213 +++ b/kernel/cred.c
72214 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
72215 */
72216 void __put_cred(struct cred *cred)
72217 {
72218 + pax_track_stack();
72219 +
72220 kdebug("__put_cred(%p{%d,%d})", cred,
72221 atomic_read(&cred->usage),
72222 read_cred_subscribers(cred));
72223 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
72224 {
72225 struct cred *cred;
72226
72227 + pax_track_stack();
72228 +
72229 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
72230 atomic_read(&tsk->cred->usage),
72231 read_cred_subscribers(tsk->cred));
72232 @@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
72233 validate_creds(cred);
72234 put_cred(cred);
72235 }
72236 +
72237 +#ifdef CONFIG_GRKERNSEC_SETXID
72238 + cred = (struct cred *) tsk->delayed_cred;
72239 + if (cred) {
72240 + tsk->delayed_cred = NULL;
72241 + validate_creds(cred);
72242 + put_cred(cred);
72243 + }
72244 +#endif
72245 }
72246
72247 /**
72248 @@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
72249 {
72250 const struct cred *cred;
72251
72252 + pax_track_stack();
72253 +
72254 rcu_read_lock();
72255
72256 do {
72257 @@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
72258 {
72259 struct cred *new;
72260
72261 + pax_track_stack();
72262 +
72263 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
72264 if (!new)
72265 return NULL;
72266 @@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
72267 const struct cred *old;
72268 struct cred *new;
72269
72270 + pax_track_stack();
72271 +
72272 validate_process_creds();
72273
72274 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72275 @@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
72276 struct thread_group_cred *tgcred = NULL;
72277 struct cred *new;
72278
72279 + pax_track_stack();
72280 +
72281 #ifdef CONFIG_KEYS
72282 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
72283 if (!tgcred)
72284 @@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
72285 struct cred *new;
72286 int ret;
72287
72288 + pax_track_stack();
72289 +
72290 mutex_init(&p->cred_guard_mutex);
72291
72292 if (
72293 @@ -523,11 +546,13 @@ error_put:
72294 * Always returns 0 thus allowing this function to be tail-called at the end
72295 * of, say, sys_setgid().
72296 */
72297 -int commit_creds(struct cred *new)
72298 +static int __commit_creds(struct cred *new)
72299 {
72300 struct task_struct *task = current;
72301 const struct cred *old = task->real_cred;
72302
72303 + pax_track_stack();
72304 +
72305 kdebug("commit_creds(%p{%d,%d})", new,
72306 atomic_read(&new->usage),
72307 read_cred_subscribers(new));
72308 @@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
72309
72310 get_cred(new); /* we will require a ref for the subj creds too */
72311
72312 + gr_set_role_label(task, new->uid, new->gid);
72313 +
72314 /* dumpability changes */
72315 if (old->euid != new->euid ||
72316 old->egid != new->egid ||
72317 @@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
72318 key_fsgid_changed(task);
72319
72320 /* do it
72321 - * - What if a process setreuid()'s and this brings the
72322 - * new uid over his NPROC rlimit? We can check this now
72323 - * cheaply with the new uid cache, so if it matters
72324 - * we should be checking for it. -DaveM
72325 + * RLIMIT_NPROC limits on user->processes have already been checked
72326 + * in set_user().
72327 */
72328 alter_cred_subscribers(new, 2);
72329 if (new->user != old->user)
72330 @@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
72331 put_cred(old);
72332 return 0;
72333 }
72334 +
72335 +#ifdef CONFIG_GRKERNSEC_SETXID
72336 +extern int set_user(struct cred *new);
72337 +
72338 +void gr_delayed_cred_worker(void)
72339 +{
72340 + const struct cred *new = current->delayed_cred;
72341 + struct cred *ncred;
72342 +
72343 + current->delayed_cred = NULL;
72344 +
72345 + if (current_uid() && new != NULL) {
72346 + // from doing get_cred on it when queueing this
72347 + put_cred(new);
72348 + return;
72349 + } else if (new == NULL)
72350 + return;
72351 +
72352 + ncred = prepare_creds();
72353 + if (!ncred)
72354 + goto die;
72355 + // uids
72356 + ncred->uid = new->uid;
72357 + ncred->euid = new->euid;
72358 + ncred->suid = new->suid;
72359 + ncred->fsuid = new->fsuid;
72360 + // gids
72361 + ncred->gid = new->gid;
72362 + ncred->egid = new->egid;
72363 + ncred->sgid = new->sgid;
72364 + ncred->fsgid = new->fsgid;
72365 + // groups
72366 + if (set_groups(ncred, new->group_info) < 0) {
72367 + abort_creds(ncred);
72368 + goto die;
72369 + }
72370 + // caps
72371 + ncred->securebits = new->securebits;
72372 + ncred->cap_inheritable = new->cap_inheritable;
72373 + ncred->cap_permitted = new->cap_permitted;
72374 + ncred->cap_effective = new->cap_effective;
72375 + ncred->cap_bset = new->cap_bset;
72376 +
72377 + if (set_user(ncred)) {
72378 + abort_creds(ncred);
72379 + goto die;
72380 + }
72381 +
72382 + // from doing get_cred on it when queueing this
72383 + put_cred(new);
72384 +
72385 + __commit_creds(ncred);
72386 + return;
72387 +die:
72388 + // from doing get_cred on it when queueing this
72389 + put_cred(new);
72390 + do_group_exit(SIGKILL);
72391 +}
72392 +#endif
72393 +
72394 +int commit_creds(struct cred *new)
72395 +{
72396 +#ifdef CONFIG_GRKERNSEC_SETXID
72397 + struct task_struct *t;
72398 +
72399 + /* we won't get called with tasklist_lock held for writing
72400 + and interrupts disabled as the cred struct in that case is
72401 + init_cred
72402 + */
72403 + if (grsec_enable_setxid && !current_is_single_threaded() &&
72404 + !current_uid() && new->uid) {
72405 + rcu_read_lock();
72406 + read_lock(&tasklist_lock);
72407 + for (t = next_thread(current); t != current;
72408 + t = next_thread(t)) {
72409 + if (t->delayed_cred == NULL) {
72410 + t->delayed_cred = get_cred(new);
72411 + set_tsk_need_resched(t);
72412 + }
72413 + }
72414 + read_unlock(&tasklist_lock);
72415 + rcu_read_unlock();
72416 + }
72417 +#endif
72418 + return __commit_creds(new);
72419 +}
72420 +
72421 EXPORT_SYMBOL(commit_creds);
72422
72423 +
72424 /**
72425 * abort_creds - Discard a set of credentials and unlock the current task
72426 * @new: The credentials that were going to be applied
72427 @@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
72428 */
72429 void abort_creds(struct cred *new)
72430 {
72431 + pax_track_stack();
72432 +
72433 kdebug("abort_creds(%p{%d,%d})", new,
72434 atomic_read(&new->usage),
72435 read_cred_subscribers(new));
72436 @@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
72437 {
72438 const struct cred *old = current->cred;
72439
72440 + pax_track_stack();
72441 +
72442 kdebug("override_creds(%p{%d,%d})", new,
72443 atomic_read(&new->usage),
72444 read_cred_subscribers(new));
72445 @@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
72446 {
72447 const struct cred *override = current->cred;
72448
72449 + pax_track_stack();
72450 +
72451 kdebug("revert_creds(%p{%d,%d})", old,
72452 atomic_read(&old->usage),
72453 read_cred_subscribers(old));
72454 @@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
72455 const struct cred *old;
72456 struct cred *new;
72457
72458 + pax_track_stack();
72459 +
72460 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72461 if (!new)
72462 return NULL;
72463 @@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
72464 */
72465 int set_security_override(struct cred *new, u32 secid)
72466 {
72467 + pax_track_stack();
72468 +
72469 return security_kernel_act_as(new, secid);
72470 }
72471 EXPORT_SYMBOL(set_security_override);
72472 @@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
72473 u32 secid;
72474 int ret;
72475
72476 + pax_track_stack();
72477 +
72478 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
72479 if (ret < 0)
72480 return ret;
72481 diff --git a/kernel/exit.c b/kernel/exit.c
72482 index 0f8fae3..7916abf 100644
72483 --- a/kernel/exit.c
72484 +++ b/kernel/exit.c
72485 @@ -55,6 +55,10 @@
72486 #include <asm/pgtable.h>
72487 #include <asm/mmu_context.h>
72488
72489 +#ifdef CONFIG_GRKERNSEC
72490 +extern rwlock_t grsec_exec_file_lock;
72491 +#endif
72492 +
72493 static void exit_mm(struct task_struct * tsk);
72494
72495 static void __unhash_process(struct task_struct *p)
72496 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
72497 struct task_struct *leader;
72498 int zap_leader;
72499 repeat:
72500 +#ifdef CONFIG_NET
72501 + gr_del_task_from_ip_table(p);
72502 +#endif
72503 +
72504 tracehook_prepare_release_task(p);
72505 /* don't need to get the RCU readlock here - the process is dead and
72506 * can't be modifying its own credentials */
72507 @@ -397,7 +405,7 @@ int allow_signal(int sig)
72508 * know it'll be handled, so that they don't get converted to
72509 * SIGKILL or just silently dropped.
72510 */
72511 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
72512 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
72513 recalc_sigpending();
72514 spin_unlock_irq(&current->sighand->siglock);
72515 return 0;
72516 @@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
72517 vsnprintf(current->comm, sizeof(current->comm), name, args);
72518 va_end(args);
72519
72520 +#ifdef CONFIG_GRKERNSEC
72521 + write_lock(&grsec_exec_file_lock);
72522 + if (current->exec_file) {
72523 + fput(current->exec_file);
72524 + current->exec_file = NULL;
72525 + }
72526 + write_unlock(&grsec_exec_file_lock);
72527 +#endif
72528 +
72529 + gr_set_kernel_label(current);
72530 +
72531 /*
72532 * If we were started as result of loading a module, close all of the
72533 * user space pages. We don't need them, and if we didn't close them
72534 @@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
72535 struct task_struct *tsk = current;
72536 int group_dead;
72537
72538 - profile_task_exit(tsk);
72539 -
72540 - WARN_ON(atomic_read(&tsk->fs_excl));
72541 -
72542 + /*
72543 + * Check this first since set_fs() below depends on
72544 + * current_thread_info(), which we better not access when we're in
72545 + * interrupt context. Other than that, we want to do the set_fs()
72546 + * as early as possible.
72547 + */
72548 if (unlikely(in_interrupt()))
72549 panic("Aiee, killing interrupt handler!");
72550 - if (unlikely(!tsk->pid))
72551 - panic("Attempted to kill the idle task!");
72552
72553 /*
72554 - * If do_exit is called because this processes oopsed, it's possible
72555 + * If do_exit is called because this processes Oops'ed, it's possible
72556 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
72557 * continuing. Amongst other possible reasons, this is to prevent
72558 * mm_release()->clear_child_tid() from writing to a user-controlled
72559 @@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
72560 */
72561 set_fs(USER_DS);
72562
72563 + profile_task_exit(tsk);
72564 +
72565 + WARN_ON(atomic_read(&tsk->fs_excl));
72566 +
72567 + if (unlikely(!tsk->pid))
72568 + panic("Attempted to kill the idle task!");
72569 +
72570 tracehook_report_exit(&code);
72571
72572 validate_creds_for_do_exit(tsk);
72573 @@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
72574 tsk->exit_code = code;
72575 taskstats_exit(tsk, group_dead);
72576
72577 + gr_acl_handle_psacct(tsk, code);
72578 + gr_acl_handle_exit();
72579 +
72580 exit_mm(tsk);
72581
72582 if (group_dead)
72583 @@ -1020,7 +1049,7 @@ NORET_TYPE void do_exit(long code)
72584 tsk->flags |= PF_EXITPIDONE;
72585
72586 if (tsk->io_context)
72587 - exit_io_context();
72588 + exit_io_context(tsk);
72589
72590 if (tsk->splice_pipe)
72591 __free_pipe_info(tsk->splice_pipe);
72592 @@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
72593
72594 if (unlikely(wo->wo_flags & WNOWAIT)) {
72595 int exit_code = p->exit_code;
72596 - int why, status;
72597 + int why;
72598
72599 get_task_struct(p);
72600 read_unlock(&tasklist_lock);
72601 diff --git a/kernel/fork.c b/kernel/fork.c
72602 index 4bde56f..8976a8f 100644
72603 --- a/kernel/fork.c
72604 +++ b/kernel/fork.c
72605 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
72606 *stackend = STACK_END_MAGIC; /* for overflow detection */
72607
72608 #ifdef CONFIG_CC_STACKPROTECTOR
72609 - tsk->stack_canary = get_random_int();
72610 + tsk->stack_canary = pax_get_random_long();
72611 #endif
72612
72613 /* One for us, one for whoever does the "release_task()" (usually parent) */
72614 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72615 mm->locked_vm = 0;
72616 mm->mmap = NULL;
72617 mm->mmap_cache = NULL;
72618 - mm->free_area_cache = oldmm->mmap_base;
72619 - mm->cached_hole_size = ~0UL;
72620 + mm->free_area_cache = oldmm->free_area_cache;
72621 + mm->cached_hole_size = oldmm->cached_hole_size;
72622 mm->map_count = 0;
72623 cpumask_clear(mm_cpumask(mm));
72624 mm->mm_rb = RB_ROOT;
72625 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72626 tmp->vm_flags &= ~VM_LOCKED;
72627 tmp->vm_mm = mm;
72628 tmp->vm_next = tmp->vm_prev = NULL;
72629 + tmp->vm_mirror = NULL;
72630 anon_vma_link(tmp);
72631 file = tmp->vm_file;
72632 if (file) {
72633 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72634 if (retval)
72635 goto out;
72636 }
72637 +
72638 +#ifdef CONFIG_PAX_SEGMEXEC
72639 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
72640 + struct vm_area_struct *mpnt_m;
72641 +
72642 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
72643 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
72644 +
72645 + if (!mpnt->vm_mirror)
72646 + continue;
72647 +
72648 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
72649 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
72650 + mpnt->vm_mirror = mpnt_m;
72651 + } else {
72652 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
72653 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
72654 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
72655 + mpnt->vm_mirror->vm_mirror = mpnt;
72656 + }
72657 + }
72658 + BUG_ON(mpnt_m);
72659 + }
72660 +#endif
72661 +
72662 /* a new mm has just been created */
72663 arch_dup_mmap(oldmm, mm);
72664 retval = 0;
72665 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
72666 write_unlock(&fs->lock);
72667 return -EAGAIN;
72668 }
72669 - fs->users++;
72670 + atomic_inc(&fs->users);
72671 write_unlock(&fs->lock);
72672 return 0;
72673 }
72674 tsk->fs = copy_fs_struct(fs);
72675 if (!tsk->fs)
72676 return -ENOMEM;
72677 + gr_set_chroot_entries(tsk, &tsk->fs->root);
72678 return 0;
72679 }
72680
72681 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72682 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
72683 #endif
72684 retval = -EAGAIN;
72685 +
72686 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
72687 +
72688 if (atomic_read(&p->real_cred->user->processes) >=
72689 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
72690 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
72691 - p->real_cred->user != INIT_USER)
72692 + if (p->real_cred->user != INIT_USER &&
72693 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
72694 goto bad_fork_free;
72695 }
72696 + current->flags &= ~PF_NPROC_EXCEEDED;
72697
72698 retval = copy_creds(p, clone_flags);
72699 if (retval < 0)
72700 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72701 goto bad_fork_free_pid;
72702 }
72703
72704 + gr_copy_label(p);
72705 +
72706 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
72707 /*
72708 * Clear TID on mm_release()?
72709 @@ -1299,7 +1332,8 @@ bad_fork_free_pid:
72710 if (pid != &init_struct_pid)
72711 free_pid(pid);
72712 bad_fork_cleanup_io:
72713 - put_io_context(p->io_context);
72714 + if (p->io_context)
72715 + exit_io_context(p);
72716 bad_fork_cleanup_namespaces:
72717 exit_task_namespaces(p);
72718 bad_fork_cleanup_mm:
72719 @@ -1333,6 +1367,8 @@ bad_fork_cleanup_count:
72720 bad_fork_free:
72721 free_task(p);
72722 fork_out:
72723 + gr_log_forkfail(retval);
72724 +
72725 return ERR_PTR(retval);
72726 }
72727
72728 @@ -1426,6 +1462,8 @@ long do_fork(unsigned long clone_flags,
72729 if (clone_flags & CLONE_PARENT_SETTID)
72730 put_user(nr, parent_tidptr);
72731
72732 + gr_handle_brute_check();
72733 +
72734 if (clone_flags & CLONE_VFORK) {
72735 p->vfork_done = &vfork;
72736 init_completion(&vfork);
72737 @@ -1558,7 +1596,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
72738 return 0;
72739
72740 /* don't need lock here; in the worst case we'll do useless copy */
72741 - if (fs->users == 1)
72742 + if (atomic_read(&fs->users) == 1)
72743 return 0;
72744
72745 *new_fsp = copy_fs_struct(fs);
72746 @@ -1681,7 +1719,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
72747 fs = current->fs;
72748 write_lock(&fs->lock);
72749 current->fs = new_fs;
72750 - if (--fs->users)
72751 + gr_set_chroot_entries(current, &current->fs->root);
72752 + if (atomic_dec_return(&fs->users))
72753 new_fs = NULL;
72754 else
72755 new_fs = fs;
72756 diff --git a/kernel/futex.c b/kernel/futex.c
72757 index fb98c9f..333faec 100644
72758 --- a/kernel/futex.c
72759 +++ b/kernel/futex.c
72760 @@ -54,6 +54,7 @@
72761 #include <linux/mount.h>
72762 #include <linux/pagemap.h>
72763 #include <linux/syscalls.h>
72764 +#include <linux/ptrace.h>
72765 #include <linux/signal.h>
72766 #include <linux/module.h>
72767 #include <linux/magic.h>
72768 @@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
72769 struct page *page;
72770 int err, ro = 0;
72771
72772 +#ifdef CONFIG_PAX_SEGMEXEC
72773 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
72774 + return -EFAULT;
72775 +#endif
72776 +
72777 /*
72778 * The futex address must be "naturally" aligned.
72779 */
72780 @@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
72781 struct futex_q q;
72782 int ret;
72783
72784 + pax_track_stack();
72785 +
72786 if (!bitset)
72787 return -EINVAL;
72788
72789 @@ -1871,7 +1879,7 @@ retry:
72790
72791 restart = &current_thread_info()->restart_block;
72792 restart->fn = futex_wait_restart;
72793 - restart->futex.uaddr = (u32 *)uaddr;
72794 + restart->futex.uaddr = uaddr;
72795 restart->futex.val = val;
72796 restart->futex.time = abs_time->tv64;
72797 restart->futex.bitset = bitset;
72798 @@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
72799 struct futex_q q;
72800 int res, ret;
72801
72802 + pax_track_stack();
72803 +
72804 if (!bitset)
72805 return -EINVAL;
72806
72807 @@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
72808 if (!p)
72809 goto err_unlock;
72810 ret = -EPERM;
72811 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72812 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
72813 + goto err_unlock;
72814 +#endif
72815 pcred = __task_cred(p);
72816 if (cred->euid != pcred->euid &&
72817 cred->euid != pcred->uid &&
72818 @@ -2489,7 +2503,7 @@ retry:
72819 */
72820 static inline int fetch_robust_entry(struct robust_list __user **entry,
72821 struct robust_list __user * __user *head,
72822 - int *pi)
72823 + unsigned int *pi)
72824 {
72825 unsigned long uentry;
72826
72827 @@ -2670,6 +2684,7 @@ static int __init futex_init(void)
72828 {
72829 u32 curval;
72830 int i;
72831 + mm_segment_t oldfs;
72832
72833 /*
72834 * This will fail and we want it. Some arch implementations do
72835 @@ -2681,7 +2696,10 @@ static int __init futex_init(void)
72836 * implementation, the non functional ones will return
72837 * -ENOSYS.
72838 */
72839 + oldfs = get_fs();
72840 + set_fs(USER_DS);
72841 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
72842 + set_fs(oldfs);
72843 if (curval == -EFAULT)
72844 futex_cmpxchg_enabled = 1;
72845
72846 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
72847 index 2357165..eb25501 100644
72848 --- a/kernel/futex_compat.c
72849 +++ b/kernel/futex_compat.c
72850 @@ -10,6 +10,7 @@
72851 #include <linux/compat.h>
72852 #include <linux/nsproxy.h>
72853 #include <linux/futex.h>
72854 +#include <linux/ptrace.h>
72855
72856 #include <asm/uaccess.h>
72857
72858 @@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72859 {
72860 struct compat_robust_list_head __user *head;
72861 unsigned long ret;
72862 - const struct cred *cred = current_cred(), *pcred;
72863 + const struct cred *cred = current_cred();
72864 + const struct cred *pcred;
72865
72866 if (!futex_cmpxchg_enabled)
72867 return -ENOSYS;
72868 @@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72869 if (!p)
72870 goto err_unlock;
72871 ret = -EPERM;
72872 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72873 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
72874 + goto err_unlock;
72875 +#endif
72876 pcred = __task_cred(p);
72877 if (cred->euid != pcred->euid &&
72878 cred->euid != pcred->uid &&
72879 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
72880 index 9b22d03..6295b62 100644
72881 --- a/kernel/gcov/base.c
72882 +++ b/kernel/gcov/base.c
72883 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
72884 }
72885
72886 #ifdef CONFIG_MODULES
72887 -static inline int within(void *addr, void *start, unsigned long size)
72888 -{
72889 - return ((addr >= start) && (addr < start + size));
72890 -}
72891 -
72892 /* Update list and generate events when modules are unloaded. */
72893 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72894 void *data)
72895 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72896 prev = NULL;
72897 /* Remove entries located in module from linked list. */
72898 for (info = gcov_info_head; info; info = info->next) {
72899 - if (within(info, mod->module_core, mod->core_size)) {
72900 + if (within_module_core_rw((unsigned long)info, mod)) {
72901 if (prev)
72902 prev->next = info->next;
72903 else
72904 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
72905 index a6e9d00..a0da4f9 100644
72906 --- a/kernel/hrtimer.c
72907 +++ b/kernel/hrtimer.c
72908 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
72909 local_irq_restore(flags);
72910 }
72911
72912 -static void run_hrtimer_softirq(struct softirq_action *h)
72913 +static void run_hrtimer_softirq(void)
72914 {
72915 hrtimer_peek_ahead_timers();
72916 }
72917 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
72918 index 8b6b8b6..6bc87df 100644
72919 --- a/kernel/kallsyms.c
72920 +++ b/kernel/kallsyms.c
72921 @@ -11,6 +11,9 @@
72922 * Changed the compression method from stem compression to "table lookup"
72923 * compression (see scripts/kallsyms.c for a more complete description)
72924 */
72925 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72926 +#define __INCLUDED_BY_HIDESYM 1
72927 +#endif
72928 #include <linux/kallsyms.h>
72929 #include <linux/module.h>
72930 #include <linux/init.h>
72931 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
72932
72933 static inline int is_kernel_inittext(unsigned long addr)
72934 {
72935 + if (system_state != SYSTEM_BOOTING)
72936 + return 0;
72937 +
72938 if (addr >= (unsigned long)_sinittext
72939 && addr <= (unsigned long)_einittext)
72940 return 1;
72941 return 0;
72942 }
72943
72944 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72945 +#ifdef CONFIG_MODULES
72946 +static inline int is_module_text(unsigned long addr)
72947 +{
72948 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
72949 + return 1;
72950 +
72951 + addr = ktla_ktva(addr);
72952 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
72953 +}
72954 +#else
72955 +static inline int is_module_text(unsigned long addr)
72956 +{
72957 + return 0;
72958 +}
72959 +#endif
72960 +#endif
72961 +
72962 static inline int is_kernel_text(unsigned long addr)
72963 {
72964 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
72965 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
72966
72967 static inline int is_kernel(unsigned long addr)
72968 {
72969 +
72970 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72971 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
72972 + return 1;
72973 +
72974 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
72975 +#else
72976 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
72977 +#endif
72978 +
72979 return 1;
72980 return in_gate_area_no_task(addr);
72981 }
72982
72983 static int is_ksym_addr(unsigned long addr)
72984 {
72985 +
72986 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72987 + if (is_module_text(addr))
72988 + return 0;
72989 +#endif
72990 +
72991 if (all_var)
72992 return is_kernel(addr);
72993
72994 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
72995
72996 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
72997 {
72998 - iter->name[0] = '\0';
72999 iter->nameoff = get_symbol_offset(new_pos);
73000 iter->pos = new_pos;
73001 }
73002 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
73003 {
73004 struct kallsym_iter *iter = m->private;
73005
73006 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73007 + if (current_uid())
73008 + return 0;
73009 +#endif
73010 +
73011 /* Some debugging symbols have no name. Ignore them. */
73012 if (!iter->name[0])
73013 return 0;
73014 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
73015 struct kallsym_iter *iter;
73016 int ret;
73017
73018 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
73019 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
73020 if (!iter)
73021 return -ENOMEM;
73022 reset_iter(iter, 0);
73023 diff --git a/kernel/kexec.c b/kernel/kexec.c
73024 index f336e21..9c1c20b 100644
73025 --- a/kernel/kexec.c
73026 +++ b/kernel/kexec.c
73027 @@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
73028 unsigned long flags)
73029 {
73030 struct compat_kexec_segment in;
73031 - struct kexec_segment out, __user *ksegments;
73032 + struct kexec_segment out;
73033 + struct kexec_segment __user *ksegments;
73034 unsigned long i, result;
73035
73036 /* Don't allow clients that don't understand the native
73037 diff --git a/kernel/kgdb.c b/kernel/kgdb.c
73038 index 53dae4b..9ba3743 100644
73039 --- a/kernel/kgdb.c
73040 +++ b/kernel/kgdb.c
73041 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
73042 /* Guard for recursive entry */
73043 static int exception_level;
73044
73045 -static struct kgdb_io *kgdb_io_ops;
73046 +static const struct kgdb_io *kgdb_io_ops;
73047 static DEFINE_SPINLOCK(kgdb_registration_lock);
73048
73049 /* kgdb console driver is loaded */
73050 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
73051 */
73052 static atomic_t passive_cpu_wait[NR_CPUS];
73053 static atomic_t cpu_in_kgdb[NR_CPUS];
73054 -atomic_t kgdb_setting_breakpoint;
73055 +atomic_unchecked_t kgdb_setting_breakpoint;
73056
73057 struct task_struct *kgdb_usethread;
73058 struct task_struct *kgdb_contthread;
73059 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
73060 sizeof(unsigned long)];
73061
73062 /* to keep track of the CPU which is doing the single stepping*/
73063 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
73064 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
73065
73066 /*
73067 * If you are debugging a problem where roundup (the collection of
73068 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
73069 return 0;
73070 if (kgdb_connected)
73071 return 1;
73072 - if (atomic_read(&kgdb_setting_breakpoint))
73073 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
73074 return 1;
73075 if (print_wait)
73076 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
73077 @@ -1426,8 +1426,8 @@ acquirelock:
73078 * instance of the exception handler wanted to come into the
73079 * debugger on a different CPU via a single step
73080 */
73081 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
73082 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
73083 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
73084 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
73085
73086 atomic_set(&kgdb_active, -1);
73087 touch_softlockup_watchdog();
73088 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
73089 *
73090 * Register it with the KGDB core.
73091 */
73092 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
73093 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
73094 {
73095 int err;
73096
73097 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
73098 *
73099 * Unregister it with the KGDB core.
73100 */
73101 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
73102 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
73103 {
73104 BUG_ON(kgdb_connected);
73105
73106 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
73107 */
73108 void kgdb_breakpoint(void)
73109 {
73110 - atomic_set(&kgdb_setting_breakpoint, 1);
73111 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
73112 wmb(); /* Sync point before breakpoint */
73113 arch_kgdb_breakpoint();
73114 wmb(); /* Sync point after breakpoint */
73115 - atomic_set(&kgdb_setting_breakpoint, 0);
73116 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
73117 }
73118 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
73119
73120 diff --git a/kernel/kmod.c b/kernel/kmod.c
73121 index a061472..40884b6 100644
73122 --- a/kernel/kmod.c
73123 +++ b/kernel/kmod.c
73124 @@ -68,13 +68,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
73125 * If module auto-loading support is disabled then this function
73126 * becomes a no-operation.
73127 */
73128 -int __request_module(bool wait, const char *fmt, ...)
73129 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
73130 {
73131 - va_list args;
73132 char module_name[MODULE_NAME_LEN];
73133 unsigned int max_modprobes;
73134 int ret;
73135 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
73136 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
73137 static char *envp[] = { "HOME=/",
73138 "TERM=linux",
73139 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
73140 @@ -87,12 +86,24 @@ int __request_module(bool wait, const char *fmt, ...)
73141 if (ret)
73142 return ret;
73143
73144 - va_start(args, fmt);
73145 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
73146 - va_end(args);
73147 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
73148 if (ret >= MODULE_NAME_LEN)
73149 return -ENAMETOOLONG;
73150
73151 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73152 + if (!current_uid()) {
73153 + /* hack to workaround consolekit/udisks stupidity */
73154 + read_lock(&tasklist_lock);
73155 + if (!strcmp(current->comm, "mount") &&
73156 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
73157 + read_unlock(&tasklist_lock);
73158 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
73159 + return -EPERM;
73160 + }
73161 + read_unlock(&tasklist_lock);
73162 + }
73163 +#endif
73164 +
73165 /* If modprobe needs a service that is in a module, we get a recursive
73166 * loop. Limit the number of running kmod threads to max_threads/2 or
73167 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
73168 @@ -126,6 +137,48 @@ int __request_module(bool wait, const char *fmt, ...)
73169 atomic_dec(&kmod_concurrent);
73170 return ret;
73171 }
73172 +
73173 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
73174 +{
73175 + va_list args;
73176 + int ret;
73177 +
73178 + va_start(args, fmt);
73179 + ret = ____request_module(wait, module_param, fmt, args);
73180 + va_end(args);
73181 +
73182 + return ret;
73183 +}
73184 +
73185 +int __request_module(bool wait, const char *fmt, ...)
73186 +{
73187 + va_list args;
73188 + int ret;
73189 +
73190 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73191 + if (current_uid()) {
73192 + char module_param[MODULE_NAME_LEN];
73193 +
73194 + memset(module_param, 0, sizeof(module_param));
73195 +
73196 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
73197 +
73198 + va_start(args, fmt);
73199 + ret = ____request_module(wait, module_param, fmt, args);
73200 + va_end(args);
73201 +
73202 + return ret;
73203 + }
73204 +#endif
73205 +
73206 + va_start(args, fmt);
73207 + ret = ____request_module(wait, NULL, fmt, args);
73208 + va_end(args);
73209 +
73210 + return ret;
73211 +}
73212 +
73213 +
73214 EXPORT_SYMBOL(__request_module);
73215 #endif /* CONFIG_MODULES */
73216
73217 @@ -231,7 +284,7 @@ static int wait_for_helper(void *data)
73218 *
73219 * Thus the __user pointer cast is valid here.
73220 */
73221 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
73222 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
73223
73224 /*
73225 * If ret is 0, either ____call_usermodehelper failed and the
73226 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
73227 index 176d825..77fa8ea 100644
73228 --- a/kernel/kprobes.c
73229 +++ b/kernel/kprobes.c
73230 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
73231 * kernel image and loaded module images reside. This is required
73232 * so x86_64 can correctly handle the %rip-relative fixups.
73233 */
73234 - kip->insns = module_alloc(PAGE_SIZE);
73235 + kip->insns = module_alloc_exec(PAGE_SIZE);
73236 if (!kip->insns) {
73237 kfree(kip);
73238 return NULL;
73239 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
73240 */
73241 if (!list_is_singular(&kprobe_insn_pages)) {
73242 list_del(&kip->list);
73243 - module_free(NULL, kip->insns);
73244 + module_free_exec(NULL, kip->insns);
73245 kfree(kip);
73246 }
73247 return 1;
73248 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
73249 {
73250 int i, err = 0;
73251 unsigned long offset = 0, size = 0;
73252 - char *modname, namebuf[128];
73253 + char *modname, namebuf[KSYM_NAME_LEN];
73254 const char *symbol_name;
73255 void *addr;
73256 struct kprobe_blackpoint *kb;
73257 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
73258 const char *sym = NULL;
73259 unsigned int i = *(loff_t *) v;
73260 unsigned long offset = 0;
73261 - char *modname, namebuf[128];
73262 + char *modname, namebuf[KSYM_NAME_LEN];
73263
73264 head = &kprobe_table[i];
73265 preempt_disable();
73266 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
73267 index d86fe89..d12fc66 100644
73268 --- a/kernel/lockdep.c
73269 +++ b/kernel/lockdep.c
73270 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
73271 /*
73272 * Various lockdep statistics:
73273 */
73274 -atomic_t chain_lookup_hits;
73275 -atomic_t chain_lookup_misses;
73276 -atomic_t hardirqs_on_events;
73277 -atomic_t hardirqs_off_events;
73278 -atomic_t redundant_hardirqs_on;
73279 -atomic_t redundant_hardirqs_off;
73280 -atomic_t softirqs_on_events;
73281 -atomic_t softirqs_off_events;
73282 -atomic_t redundant_softirqs_on;
73283 -atomic_t redundant_softirqs_off;
73284 -atomic_t nr_unused_locks;
73285 -atomic_t nr_cyclic_checks;
73286 -atomic_t nr_find_usage_forwards_checks;
73287 -atomic_t nr_find_usage_backwards_checks;
73288 +atomic_unchecked_t chain_lookup_hits;
73289 +atomic_unchecked_t chain_lookup_misses;
73290 +atomic_unchecked_t hardirqs_on_events;
73291 +atomic_unchecked_t hardirqs_off_events;
73292 +atomic_unchecked_t redundant_hardirqs_on;
73293 +atomic_unchecked_t redundant_hardirqs_off;
73294 +atomic_unchecked_t softirqs_on_events;
73295 +atomic_unchecked_t softirqs_off_events;
73296 +atomic_unchecked_t redundant_softirqs_on;
73297 +atomic_unchecked_t redundant_softirqs_off;
73298 +atomic_unchecked_t nr_unused_locks;
73299 +atomic_unchecked_t nr_cyclic_checks;
73300 +atomic_unchecked_t nr_find_usage_forwards_checks;
73301 +atomic_unchecked_t nr_find_usage_backwards_checks;
73302 #endif
73303
73304 /*
73305 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
73306 int i;
73307 #endif
73308
73309 +#ifdef CONFIG_PAX_KERNEXEC
73310 + start = ktla_ktva(start);
73311 +#endif
73312 +
73313 /*
73314 * static variable?
73315 */
73316 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
73317 */
73318 for_each_possible_cpu(i) {
73319 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
73320 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
73321 - + per_cpu_offset(i);
73322 + end = start + PERCPU_ENOUGH_ROOM;
73323
73324 if ((addr >= start) && (addr < end))
73325 return 1;
73326 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
73327 if (!static_obj(lock->key)) {
73328 debug_locks_off();
73329 printk("INFO: trying to register non-static key.\n");
73330 + printk("lock:%pS key:%pS.\n", lock, lock->key);
73331 printk("the code is fine but needs lockdep annotation.\n");
73332 printk("turning off the locking correctness validator.\n");
73333 dump_stack();
73334 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
73335 if (!class)
73336 return 0;
73337 }
73338 - debug_atomic_inc((atomic_t *)&class->ops);
73339 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
73340 if (very_verbose(class)) {
73341 printk("\nacquire class [%p] %s", class->key, class->name);
73342 if (class->name_version > 1)
73343 diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
73344 index a2ee95a..092f0f2 100644
73345 --- a/kernel/lockdep_internals.h
73346 +++ b/kernel/lockdep_internals.h
73347 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
73348 /*
73349 * Various lockdep statistics:
73350 */
73351 -extern atomic_t chain_lookup_hits;
73352 -extern atomic_t chain_lookup_misses;
73353 -extern atomic_t hardirqs_on_events;
73354 -extern atomic_t hardirqs_off_events;
73355 -extern atomic_t redundant_hardirqs_on;
73356 -extern atomic_t redundant_hardirqs_off;
73357 -extern atomic_t softirqs_on_events;
73358 -extern atomic_t softirqs_off_events;
73359 -extern atomic_t redundant_softirqs_on;
73360 -extern atomic_t redundant_softirqs_off;
73361 -extern atomic_t nr_unused_locks;
73362 -extern atomic_t nr_cyclic_checks;
73363 -extern atomic_t nr_cyclic_check_recursions;
73364 -extern atomic_t nr_find_usage_forwards_checks;
73365 -extern atomic_t nr_find_usage_forwards_recursions;
73366 -extern atomic_t nr_find_usage_backwards_checks;
73367 -extern atomic_t nr_find_usage_backwards_recursions;
73368 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
73369 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
73370 -# define debug_atomic_read(ptr) atomic_read(ptr)
73371 +extern atomic_unchecked_t chain_lookup_hits;
73372 +extern atomic_unchecked_t chain_lookup_misses;
73373 +extern atomic_unchecked_t hardirqs_on_events;
73374 +extern atomic_unchecked_t hardirqs_off_events;
73375 +extern atomic_unchecked_t redundant_hardirqs_on;
73376 +extern atomic_unchecked_t redundant_hardirqs_off;
73377 +extern atomic_unchecked_t softirqs_on_events;
73378 +extern atomic_unchecked_t softirqs_off_events;
73379 +extern atomic_unchecked_t redundant_softirqs_on;
73380 +extern atomic_unchecked_t redundant_softirqs_off;
73381 +extern atomic_unchecked_t nr_unused_locks;
73382 +extern atomic_unchecked_t nr_cyclic_checks;
73383 +extern atomic_unchecked_t nr_cyclic_check_recursions;
73384 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
73385 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
73386 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
73387 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
73388 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
73389 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
73390 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
73391 #else
73392 # define debug_atomic_inc(ptr) do { } while (0)
73393 # define debug_atomic_dec(ptr) do { } while (0)
73394 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
73395 index d4aba4f..02a353f 100644
73396 --- a/kernel/lockdep_proc.c
73397 +++ b/kernel/lockdep_proc.c
73398 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
73399
73400 static void print_name(struct seq_file *m, struct lock_class *class)
73401 {
73402 - char str[128];
73403 + char str[KSYM_NAME_LEN];
73404 const char *name = class->name;
73405
73406 if (!name) {
73407 diff --git a/kernel/module.c b/kernel/module.c
73408 index 4b270e6..2226274 100644
73409 --- a/kernel/module.c
73410 +++ b/kernel/module.c
73411 @@ -55,6 +55,7 @@
73412 #include <linux/async.h>
73413 #include <linux/percpu.h>
73414 #include <linux/kmemleak.h>
73415 +#include <linux/grsecurity.h>
73416
73417 #define CREATE_TRACE_POINTS
73418 #include <trace/events/module.h>
73419 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
73420 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
73421
73422 /* Bounds of module allocation, for speeding __module_address */
73423 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
73424 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
73425 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
73426
73427 int register_module_notifier(struct notifier_block * nb)
73428 {
73429 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
73430 return true;
73431
73432 list_for_each_entry_rcu(mod, &modules, list) {
73433 - struct symsearch arr[] = {
73434 + struct symsearch modarr[] = {
73435 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
73436 NOT_GPL_ONLY, false },
73437 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
73438 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
73439 #endif
73440 };
73441
73442 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
73443 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
73444 return true;
73445 }
73446 return false;
73447 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
73448 void *ptr;
73449 int cpu;
73450
73451 - if (align > PAGE_SIZE) {
73452 + if (align-1 >= PAGE_SIZE) {
73453 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
73454 name, align, PAGE_SIZE);
73455 align = PAGE_SIZE;
73456 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
73457 * /sys/module/foo/sections stuff
73458 * J. Corbet <corbet@lwn.net>
73459 */
73460 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
73461 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
73462
73463 static inline bool sect_empty(const Elf_Shdr *sect)
73464 {
73465 @@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
73466 destroy_params(mod->kp, mod->num_kp);
73467
73468 /* This may be NULL, but that's OK */
73469 - module_free(mod, mod->module_init);
73470 + module_free(mod, mod->module_init_rw);
73471 + module_free_exec(mod, mod->module_init_rx);
73472 kfree(mod->args);
73473 if (mod->percpu)
73474 percpu_modfree(mod->percpu);
73475 @@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
73476 percpu_modfree(mod->refptr);
73477 #endif
73478 /* Free lock-classes: */
73479 - lockdep_free_key_range(mod->module_core, mod->core_size);
73480 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
73481 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
73482
73483 /* Finally, free the core (containing the module structure) */
73484 - module_free(mod, mod->module_core);
73485 + module_free_exec(mod, mod->module_core_rx);
73486 + module_free(mod, mod->module_core_rw);
73487
73488 #ifdef CONFIG_MPU
73489 update_protections(current->mm);
73490 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73491 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73492 int ret = 0;
73493 const struct kernel_symbol *ksym;
73494 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73495 + int is_fs_load = 0;
73496 + int register_filesystem_found = 0;
73497 + char *p;
73498 +
73499 + p = strstr(mod->args, "grsec_modharden_fs");
73500 +
73501 + if (p) {
73502 + char *endptr = p + strlen("grsec_modharden_fs");
73503 + /* copy \0 as well */
73504 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
73505 + is_fs_load = 1;
73506 + }
73507 +#endif
73508 +
73509
73510 for (i = 1; i < n; i++) {
73511 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73512 + const char *name = strtab + sym[i].st_name;
73513 +
73514 + /* it's a real shame this will never get ripped and copied
73515 + upstream! ;(
73516 + */
73517 + if (is_fs_load && !strcmp(name, "register_filesystem"))
73518 + register_filesystem_found = 1;
73519 +#endif
73520 switch (sym[i].st_shndx) {
73521 case SHN_COMMON:
73522 /* We compiled with -fno-common. These are not
73523 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73524 strtab + sym[i].st_name, mod);
73525 /* Ok if resolved. */
73526 if (ksym) {
73527 + pax_open_kernel();
73528 sym[i].st_value = ksym->value;
73529 + pax_close_kernel();
73530 break;
73531 }
73532
73533 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73534 secbase = (unsigned long)mod->percpu;
73535 else
73536 secbase = sechdrs[sym[i].st_shndx].sh_addr;
73537 + pax_open_kernel();
73538 sym[i].st_value += secbase;
73539 + pax_close_kernel();
73540 break;
73541 }
73542 }
73543
73544 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73545 + if (is_fs_load && !register_filesystem_found) {
73546 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
73547 + ret = -EPERM;
73548 + }
73549 +#endif
73550 +
73551 return ret;
73552 }
73553
73554 @@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
73555 || s->sh_entsize != ~0UL
73556 || strstarts(secstrings + s->sh_name, ".init"))
73557 continue;
73558 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
73559 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
73560 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
73561 + else
73562 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
73563 DEBUGP("\t%s\n", secstrings + s->sh_name);
73564 }
73565 - if (m == 0)
73566 - mod->core_text_size = mod->core_size;
73567 }
73568
73569 DEBUGP("Init section allocation order:\n");
73570 @@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
73571 || s->sh_entsize != ~0UL
73572 || !strstarts(secstrings + s->sh_name, ".init"))
73573 continue;
73574 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
73575 - | INIT_OFFSET_MASK);
73576 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
73577 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
73578 + else
73579 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
73580 + s->sh_entsize |= INIT_OFFSET_MASK;
73581 DEBUGP("\t%s\n", secstrings + s->sh_name);
73582 }
73583 - if (m == 0)
73584 - mod->init_text_size = mod->init_size;
73585 }
73586 }
73587
73588 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
73589
73590 /* As per nm */
73591 static char elf_type(const Elf_Sym *sym,
73592 - Elf_Shdr *sechdrs,
73593 - const char *secstrings,
73594 - struct module *mod)
73595 + const Elf_Shdr *sechdrs,
73596 + const char *secstrings)
73597 {
73598 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
73599 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
73600 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
73601
73602 /* Put symbol section at end of init part of module. */
73603 symsect->sh_flags |= SHF_ALLOC;
73604 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
73605 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
73606 symindex) | INIT_OFFSET_MASK;
73607 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
73608
73609 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
73610 }
73611
73612 /* Append room for core symbols at end of core part. */
73613 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
73614 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
73615 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
73616 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
73617
73618 /* Put string table section at end of init part of module. */
73619 strsect->sh_flags |= SHF_ALLOC;
73620 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
73621 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
73622 strindex) | INIT_OFFSET_MASK;
73623 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
73624
73625 /* Append room for core symbols' strings at end of core part. */
73626 - *pstroffs = mod->core_size;
73627 + *pstroffs = mod->core_size_rx;
73628 __set_bit(0, strmap);
73629 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
73630 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
73631
73632 return symoffs;
73633 }
73634 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
73635 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73636 mod->strtab = (void *)sechdrs[strindex].sh_addr;
73637
73638 + pax_open_kernel();
73639 +
73640 /* Set types up while we still have access to sections. */
73641 for (i = 0; i < mod->num_symtab; i++)
73642 mod->symtab[i].st_info
73643 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
73644 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
73645
73646 - mod->core_symtab = dst = mod->module_core + symoffs;
73647 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
73648 src = mod->symtab;
73649 *dst = *src;
73650 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
73651 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
73652 }
73653 mod->core_num_syms = ndst;
73654
73655 - mod->core_strtab = s = mod->module_core + stroffs;
73656 + mod->core_strtab = s = mod->module_core_rx + stroffs;
73657 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
73658 if (test_bit(i, strmap))
73659 *++s = mod->strtab[i];
73660 +
73661 + pax_close_kernel();
73662 }
73663 #else
73664 static inline unsigned long layout_symtab(struct module *mod,
73665 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
73666 #endif
73667 }
73668
73669 -static void *module_alloc_update_bounds(unsigned long size)
73670 +static void *module_alloc_update_bounds_rw(unsigned long size)
73671 {
73672 void *ret = module_alloc(size);
73673
73674 if (ret) {
73675 /* Update module bounds. */
73676 - if ((unsigned long)ret < module_addr_min)
73677 - module_addr_min = (unsigned long)ret;
73678 - if ((unsigned long)ret + size > module_addr_max)
73679 - module_addr_max = (unsigned long)ret + size;
73680 + if ((unsigned long)ret < module_addr_min_rw)
73681 + module_addr_min_rw = (unsigned long)ret;
73682 + if ((unsigned long)ret + size > module_addr_max_rw)
73683 + module_addr_max_rw = (unsigned long)ret + size;
73684 + }
73685 + return ret;
73686 +}
73687 +
73688 +static void *module_alloc_update_bounds_rx(unsigned long size)
73689 +{
73690 + void *ret = module_alloc_exec(size);
73691 +
73692 + if (ret) {
73693 + /* Update module bounds. */
73694 + if ((unsigned long)ret < module_addr_min_rx)
73695 + module_addr_min_rx = (unsigned long)ret;
73696 + if ((unsigned long)ret + size > module_addr_max_rx)
73697 + module_addr_max_rx = (unsigned long)ret + size;
73698 }
73699 return ret;
73700 }
73701 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73702 unsigned int i;
73703
73704 /* only scan the sections containing data */
73705 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
73706 - (unsigned long)mod->module_core,
73707 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
73708 + (unsigned long)mod->module_core_rw,
73709 sizeof(struct module), GFP_KERNEL);
73710
73711 for (i = 1; i < hdr->e_shnum; i++) {
73712 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73713 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
73714 continue;
73715
73716 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
73717 - (unsigned long)mod->module_core,
73718 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
73719 + (unsigned long)mod->module_core_rw,
73720 sechdrs[i].sh_size, GFP_KERNEL);
73721 }
73722 }
73723 @@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
73724 Elf_Ehdr *hdr;
73725 Elf_Shdr *sechdrs;
73726 char *secstrings, *args, *modmagic, *strtab = NULL;
73727 - char *staging;
73728 + char *staging, *license;
73729 unsigned int i;
73730 unsigned int symindex = 0;
73731 unsigned int strindex = 0;
73732 @@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
73733 goto free_hdr;
73734 }
73735
73736 + license = get_modinfo(sechdrs, infoindex, "license");
73737 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
73738 + if (!license || !license_is_gpl_compatible(license)) {
73739 + err -ENOEXEC;
73740 + goto free_hdr;
73741 + }
73742 +#endif
73743 +
73744 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
73745 /* This is allowed: modprobe --force will invalidate it. */
73746 if (!modmagic) {
73747 @@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
73748 secstrings, &stroffs, strmap);
73749
73750 /* Do the allocs. */
73751 - ptr = module_alloc_update_bounds(mod->core_size);
73752 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
73753 /*
73754 * The pointer to this block is stored in the module structure
73755 * which is inside the block. Just mark it as not being a
73756 @@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
73757 err = -ENOMEM;
73758 goto free_percpu;
73759 }
73760 - memset(ptr, 0, mod->core_size);
73761 - mod->module_core = ptr;
73762 + memset(ptr, 0, mod->core_size_rw);
73763 + mod->module_core_rw = ptr;
73764
73765 - ptr = module_alloc_update_bounds(mod->init_size);
73766 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
73767 /*
73768 * The pointer to this block is stored in the module structure
73769 * which is inside the block. This block doesn't need to be
73770 * scanned as it contains data and code that will be freed
73771 * after the module is initialized.
73772 */
73773 - kmemleak_ignore(ptr);
73774 - if (!ptr && mod->init_size) {
73775 + kmemleak_not_leak(ptr);
73776 + if (!ptr && mod->init_size_rw) {
73777 err = -ENOMEM;
73778 - goto free_core;
73779 + goto free_core_rw;
73780 }
73781 - memset(ptr, 0, mod->init_size);
73782 - mod->module_init = ptr;
73783 + memset(ptr, 0, mod->init_size_rw);
73784 + mod->module_init_rw = ptr;
73785 +
73786 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
73787 + kmemleak_not_leak(ptr);
73788 + if (!ptr) {
73789 + err = -ENOMEM;
73790 + goto free_init_rw;
73791 + }
73792 +
73793 + pax_open_kernel();
73794 + memset(ptr, 0, mod->core_size_rx);
73795 + pax_close_kernel();
73796 + mod->module_core_rx = ptr;
73797 +
73798 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
73799 + kmemleak_not_leak(ptr);
73800 + if (!ptr && mod->init_size_rx) {
73801 + err = -ENOMEM;
73802 + goto free_core_rx;
73803 + }
73804 +
73805 + pax_open_kernel();
73806 + memset(ptr, 0, mod->init_size_rx);
73807 + pax_close_kernel();
73808 + mod->module_init_rx = ptr;
73809
73810 /* Transfer each section which specifies SHF_ALLOC */
73811 DEBUGP("final section addresses:\n");
73812 @@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
73813 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
73814 continue;
73815
73816 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
73817 - dest = mod->module_init
73818 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73819 - else
73820 - dest = mod->module_core + sechdrs[i].sh_entsize;
73821 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
73822 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73823 + dest = mod->module_init_rw
73824 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73825 + else
73826 + dest = mod->module_init_rx
73827 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73828 + } else {
73829 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73830 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
73831 + else
73832 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
73833 + }
73834
73835 - if (sechdrs[i].sh_type != SHT_NOBITS)
73836 - memcpy(dest, (void *)sechdrs[i].sh_addr,
73837 - sechdrs[i].sh_size);
73838 + if (sechdrs[i].sh_type != SHT_NOBITS) {
73839 +
73840 +#ifdef CONFIG_PAX_KERNEXEC
73841 +#ifdef CONFIG_X86_64
73842 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
73843 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
73844 +#endif
73845 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
73846 + pax_open_kernel();
73847 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73848 + pax_close_kernel();
73849 + } else
73850 +#endif
73851 +
73852 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73853 + }
73854 /* Update sh_addr to point to copy in image. */
73855 - sechdrs[i].sh_addr = (unsigned long)dest;
73856 +
73857 +#ifdef CONFIG_PAX_KERNEXEC
73858 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
73859 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
73860 + else
73861 +#endif
73862 +
73863 + sechdrs[i].sh_addr = (unsigned long)dest;
73864 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
73865 }
73866 /* Module has been moved. */
73867 @@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
73868 mod->name);
73869 if (!mod->refptr) {
73870 err = -ENOMEM;
73871 - goto free_init;
73872 + goto free_init_rx;
73873 }
73874 #endif
73875 /* Now we've moved module, initialize linked lists, etc. */
73876 @@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
73877 goto free_unload;
73878
73879 /* Set up license info based on the info section */
73880 - set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
73881 + set_license(mod, license);
73882
73883 /*
73884 * ndiswrapper is under GPL by itself, but loads proprietary modules.
73885 @@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
73886 /* Set up MODINFO_ATTR fields */
73887 setup_modinfo(mod, sechdrs, infoindex);
73888
73889 + mod->args = args;
73890 +
73891 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73892 + {
73893 + char *p, *p2;
73894 +
73895 + if (strstr(mod->args, "grsec_modharden_netdev")) {
73896 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
73897 + err = -EPERM;
73898 + goto cleanup;
73899 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
73900 + p += strlen("grsec_modharden_normal");
73901 + p2 = strstr(p, "_");
73902 + if (p2) {
73903 + *p2 = '\0';
73904 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
73905 + *p2 = '_';
73906 + }
73907 + err = -EPERM;
73908 + goto cleanup;
73909 + }
73910 + }
73911 +#endif
73912 +
73913 +
73914 /* Fix up syms, so that st_value is a pointer to location. */
73915 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
73916 mod);
73917 @@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
73918
73919 /* Now do relocations. */
73920 for (i = 1; i < hdr->e_shnum; i++) {
73921 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
73922 unsigned int info = sechdrs[i].sh_info;
73923 + strtab = (char *)sechdrs[strindex].sh_addr;
73924
73925 /* Not a valid relocation section? */
73926 if (info >= hdr->e_shnum)
73927 @@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
73928 * Do it before processing of module parameters, so the module
73929 * can provide parameter accessor functions of its own.
73930 */
73931 - if (mod->module_init)
73932 - flush_icache_range((unsigned long)mod->module_init,
73933 - (unsigned long)mod->module_init
73934 - + mod->init_size);
73935 - flush_icache_range((unsigned long)mod->module_core,
73936 - (unsigned long)mod->module_core + mod->core_size);
73937 + if (mod->module_init_rx)
73938 + flush_icache_range((unsigned long)mod->module_init_rx,
73939 + (unsigned long)mod->module_init_rx
73940 + + mod->init_size_rx);
73941 + flush_icache_range((unsigned long)mod->module_core_rx,
73942 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
73943
73944 set_fs(old_fs);
73945
73946 - mod->args = args;
73947 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
73948 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
73949 mod->name);
73950 @@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
73951 free_unload:
73952 module_unload_free(mod);
73953 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
73954 + free_init_rx:
73955 percpu_modfree(mod->refptr);
73956 - free_init:
73957 #endif
73958 - module_free(mod, mod->module_init);
73959 - free_core:
73960 - module_free(mod, mod->module_core);
73961 + module_free_exec(mod, mod->module_init_rx);
73962 + free_core_rx:
73963 + module_free_exec(mod, mod->module_core_rx);
73964 + free_init_rw:
73965 + module_free(mod, mod->module_init_rw);
73966 + free_core_rw:
73967 + module_free(mod, mod->module_core_rw);
73968 /* mod will be freed with core. Don't access it beyond this line! */
73969 free_percpu:
73970 if (percpu)
73971 @@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
73972 mod->symtab = mod->core_symtab;
73973 mod->strtab = mod->core_strtab;
73974 #endif
73975 - module_free(mod, mod->module_init);
73976 - mod->module_init = NULL;
73977 - mod->init_size = 0;
73978 - mod->init_text_size = 0;
73979 + module_free(mod, mod->module_init_rw);
73980 + module_free_exec(mod, mod->module_init_rx);
73981 + mod->module_init_rw = NULL;
73982 + mod->module_init_rx = NULL;
73983 + mod->init_size_rw = 0;
73984 + mod->init_size_rx = 0;
73985 mutex_unlock(&module_mutex);
73986
73987 return 0;
73988 @@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
73989 unsigned long nextval;
73990
73991 /* At worse, next value is at end of module */
73992 - if (within_module_init(addr, mod))
73993 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
73994 + if (within_module_init_rx(addr, mod))
73995 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
73996 + else if (within_module_init_rw(addr, mod))
73997 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
73998 + else if (within_module_core_rx(addr, mod))
73999 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
74000 + else if (within_module_core_rw(addr, mod))
74001 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
74002 else
74003 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
74004 + return NULL;
74005
74006 /* Scan for closest preceeding symbol, and next symbol. (ELF
74007 starts real symbols at 1). */
74008 @@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
74009 char buf[8];
74010
74011 seq_printf(m, "%s %u",
74012 - mod->name, mod->init_size + mod->core_size);
74013 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
74014 print_unload_info(m, mod);
74015
74016 /* Informative for users. */
74017 @@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
74018 mod->state == MODULE_STATE_COMING ? "Loading":
74019 "Live");
74020 /* Used by oprofile and other similar tools. */
74021 - seq_printf(m, " 0x%p", mod->module_core);
74022 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
74023
74024 /* Taints info */
74025 if (mod->taints)
74026 @@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
74027
74028 static int __init proc_modules_init(void)
74029 {
74030 +#ifndef CONFIG_GRKERNSEC_HIDESYM
74031 +#ifdef CONFIG_GRKERNSEC_PROC_USER
74032 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
74033 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74034 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
74035 +#else
74036 proc_create("modules", 0, NULL, &proc_modules_operations);
74037 +#endif
74038 +#else
74039 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
74040 +#endif
74041 return 0;
74042 }
74043 module_init(proc_modules_init);
74044 @@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
74045 {
74046 struct module *mod;
74047
74048 - if (addr < module_addr_min || addr > module_addr_max)
74049 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
74050 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
74051 return NULL;
74052
74053 list_for_each_entry_rcu(mod, &modules, list)
74054 - if (within_module_core(addr, mod)
74055 - || within_module_init(addr, mod))
74056 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
74057 return mod;
74058 return NULL;
74059 }
74060 @@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
74061 */
74062 struct module *__module_text_address(unsigned long addr)
74063 {
74064 - struct module *mod = __module_address(addr);
74065 + struct module *mod;
74066 +
74067 +#ifdef CONFIG_X86_32
74068 + addr = ktla_ktva(addr);
74069 +#endif
74070 +
74071 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
74072 + return NULL;
74073 +
74074 + mod = __module_address(addr);
74075 +
74076 if (mod) {
74077 /* Make sure it's within the text section. */
74078 - if (!within(addr, mod->module_init, mod->init_text_size)
74079 - && !within(addr, mod->module_core, mod->core_text_size))
74080 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
74081 mod = NULL;
74082 }
74083 return mod;
74084 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
74085 index ec815a9..fe46e99 100644
74086 --- a/kernel/mutex-debug.c
74087 +++ b/kernel/mutex-debug.c
74088 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
74089 }
74090
74091 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
74092 - struct thread_info *ti)
74093 + struct task_struct *task)
74094 {
74095 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
74096
74097 /* Mark the current thread as blocked on the lock: */
74098 - ti->task->blocked_on = waiter;
74099 + task->blocked_on = waiter;
74100 }
74101
74102 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
74103 - struct thread_info *ti)
74104 + struct task_struct *task)
74105 {
74106 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
74107 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
74108 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
74109 - ti->task->blocked_on = NULL;
74110 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
74111 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
74112 + task->blocked_on = NULL;
74113
74114 list_del_init(&waiter->list);
74115 waiter->task = NULL;
74116 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
74117 return;
74118
74119 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
74120 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
74121 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
74122 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
74123 mutex_clear_owner(lock);
74124 }
74125 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
74126 index 6b2d735..372d3c4 100644
74127 --- a/kernel/mutex-debug.h
74128 +++ b/kernel/mutex-debug.h
74129 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
74130 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
74131 extern void debug_mutex_add_waiter(struct mutex *lock,
74132 struct mutex_waiter *waiter,
74133 - struct thread_info *ti);
74134 + struct task_struct *task);
74135 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
74136 - struct thread_info *ti);
74137 + struct task_struct *task);
74138 extern void debug_mutex_unlock(struct mutex *lock);
74139 extern void debug_mutex_init(struct mutex *lock, const char *name,
74140 struct lock_class_key *key);
74141
74142 static inline void mutex_set_owner(struct mutex *lock)
74143 {
74144 - lock->owner = current_thread_info();
74145 + lock->owner = current;
74146 }
74147
74148 static inline void mutex_clear_owner(struct mutex *lock)
74149 diff --git a/kernel/mutex.c b/kernel/mutex.c
74150 index f85644c..5ee9f77 100644
74151 --- a/kernel/mutex.c
74152 +++ b/kernel/mutex.c
74153 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74154 */
74155
74156 for (;;) {
74157 - struct thread_info *owner;
74158 + struct task_struct *owner;
74159
74160 /*
74161 * If we own the BKL, then don't spin. The owner of
74162 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74163 spin_lock_mutex(&lock->wait_lock, flags);
74164
74165 debug_mutex_lock_common(lock, &waiter);
74166 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
74167 + debug_mutex_add_waiter(lock, &waiter, task);
74168
74169 /* add waiting tasks to the end of the waitqueue (FIFO): */
74170 list_add_tail(&waiter.list, &lock->wait_list);
74171 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74172 * TASK_UNINTERRUPTIBLE case.)
74173 */
74174 if (unlikely(signal_pending_state(state, task))) {
74175 - mutex_remove_waiter(lock, &waiter,
74176 - task_thread_info(task));
74177 + mutex_remove_waiter(lock, &waiter, task);
74178 mutex_release(&lock->dep_map, 1, ip);
74179 spin_unlock_mutex(&lock->wait_lock, flags);
74180
74181 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74182 done:
74183 lock_acquired(&lock->dep_map, ip);
74184 /* got the lock - rejoice! */
74185 - mutex_remove_waiter(lock, &waiter, current_thread_info());
74186 + mutex_remove_waiter(lock, &waiter, task);
74187 mutex_set_owner(lock);
74188
74189 /* set it to 0 if there are no waiters left: */
74190 diff --git a/kernel/mutex.h b/kernel/mutex.h
74191 index 67578ca..4115fbf 100644
74192 --- a/kernel/mutex.h
74193 +++ b/kernel/mutex.h
74194 @@ -19,7 +19,7 @@
74195 #ifdef CONFIG_SMP
74196 static inline void mutex_set_owner(struct mutex *lock)
74197 {
74198 - lock->owner = current_thread_info();
74199 + lock->owner = current;
74200 }
74201
74202 static inline void mutex_clear_owner(struct mutex *lock)
74203 diff --git a/kernel/panic.c b/kernel/panic.c
74204 index 96b45d0..ff70a46 100644
74205 --- a/kernel/panic.c
74206 +++ b/kernel/panic.c
74207 @@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
74208 va_end(args);
74209 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
74210 #ifdef CONFIG_DEBUG_BUGVERBOSE
74211 - dump_stack();
74212 + /*
74213 + * Avoid nested stack-dumping if a panic occurs during oops processing
74214 + */
74215 + if (!oops_in_progress)
74216 + dump_stack();
74217 #endif
74218
74219 /*
74220 @@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
74221 const char *board;
74222
74223 printk(KERN_WARNING "------------[ cut here ]------------\n");
74224 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
74225 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
74226 board = dmi_get_system_info(DMI_PRODUCT_NAME);
74227 if (board)
74228 printk(KERN_WARNING "Hardware name: %s\n", board);
74229 @@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
74230 */
74231 void __stack_chk_fail(void)
74232 {
74233 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
74234 + dump_stack();
74235 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
74236 __builtin_return_address(0));
74237 }
74238 EXPORT_SYMBOL(__stack_chk_fail);
74239 diff --git a/kernel/params.c b/kernel/params.c
74240 index d656c27..21e452c 100644
74241 --- a/kernel/params.c
74242 +++ b/kernel/params.c
74243 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
74244 return ret;
74245 }
74246
74247 -static struct sysfs_ops module_sysfs_ops = {
74248 +static const struct sysfs_ops module_sysfs_ops = {
74249 .show = module_attr_show,
74250 .store = module_attr_store,
74251 };
74252 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
74253 return 0;
74254 }
74255
74256 -static struct kset_uevent_ops module_uevent_ops = {
74257 +static const struct kset_uevent_ops module_uevent_ops = {
74258 .filter = uevent_filter,
74259 };
74260
74261 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
74262 index 37ebc14..9c121d9 100644
74263 --- a/kernel/perf_event.c
74264 +++ b/kernel/perf_event.c
74265 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
74266 */
74267 int sysctl_perf_event_sample_rate __read_mostly = 100000;
74268
74269 -static atomic64_t perf_event_id;
74270 +static atomic64_unchecked_t perf_event_id;
74271
74272 /*
74273 * Lock for (sysadmin-configurable) event reservations:
74274 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
74275 * In order to keep per-task stats reliable we need to flip the event
74276 * values when we flip the contexts.
74277 */
74278 - value = atomic64_read(&next_event->count);
74279 - value = atomic64_xchg(&event->count, value);
74280 - atomic64_set(&next_event->count, value);
74281 + value = atomic64_read_unchecked(&next_event->count);
74282 + value = atomic64_xchg_unchecked(&event->count, value);
74283 + atomic64_set_unchecked(&next_event->count, value);
74284
74285 swap(event->total_time_enabled, next_event->total_time_enabled);
74286 swap(event->total_time_running, next_event->total_time_running);
74287 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
74288 update_event_times(event);
74289 }
74290
74291 - return atomic64_read(&event->count);
74292 + return atomic64_read_unchecked(&event->count);
74293 }
74294
74295 /*
74296 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
74297 values[n++] = 1 + leader->nr_siblings;
74298 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74299 values[n++] = leader->total_time_enabled +
74300 - atomic64_read(&leader->child_total_time_enabled);
74301 + atomic64_read_unchecked(&leader->child_total_time_enabled);
74302 }
74303 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74304 values[n++] = leader->total_time_running +
74305 - atomic64_read(&leader->child_total_time_running);
74306 + atomic64_read_unchecked(&leader->child_total_time_running);
74307 }
74308
74309 size = n * sizeof(u64);
74310 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
74311 values[n++] = perf_event_read_value(event);
74312 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74313 values[n++] = event->total_time_enabled +
74314 - atomic64_read(&event->child_total_time_enabled);
74315 + atomic64_read_unchecked(&event->child_total_time_enabled);
74316 }
74317 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74318 values[n++] = event->total_time_running +
74319 - atomic64_read(&event->child_total_time_running);
74320 + atomic64_read_unchecked(&event->child_total_time_running);
74321 }
74322 if (read_format & PERF_FORMAT_ID)
74323 values[n++] = primary_event_id(event);
74324 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
74325 static void perf_event_reset(struct perf_event *event)
74326 {
74327 (void)perf_event_read(event);
74328 - atomic64_set(&event->count, 0);
74329 + atomic64_set_unchecked(&event->count, 0);
74330 perf_event_update_userpage(event);
74331 }
74332
74333 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
74334 ++userpg->lock;
74335 barrier();
74336 userpg->index = perf_event_index(event);
74337 - userpg->offset = atomic64_read(&event->count);
74338 + userpg->offset = atomic64_read_unchecked(&event->count);
74339 if (event->state == PERF_EVENT_STATE_ACTIVE)
74340 - userpg->offset -= atomic64_read(&event->hw.prev_count);
74341 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
74342
74343 userpg->time_enabled = event->total_time_enabled +
74344 - atomic64_read(&event->child_total_time_enabled);
74345 + atomic64_read_unchecked(&event->child_total_time_enabled);
74346
74347 userpg->time_running = event->total_time_running +
74348 - atomic64_read(&event->child_total_time_running);
74349 + atomic64_read_unchecked(&event->child_total_time_running);
74350
74351 barrier();
74352 ++userpg->lock;
74353 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
74354 u64 values[4];
74355 int n = 0;
74356
74357 - values[n++] = atomic64_read(&event->count);
74358 + values[n++] = atomic64_read_unchecked(&event->count);
74359 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74360 values[n++] = event->total_time_enabled +
74361 - atomic64_read(&event->child_total_time_enabled);
74362 + atomic64_read_unchecked(&event->child_total_time_enabled);
74363 }
74364 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74365 values[n++] = event->total_time_running +
74366 - atomic64_read(&event->child_total_time_running);
74367 + atomic64_read_unchecked(&event->child_total_time_running);
74368 }
74369 if (read_format & PERF_FORMAT_ID)
74370 values[n++] = primary_event_id(event);
74371 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74372 if (leader != event)
74373 leader->pmu->read(leader);
74374
74375 - values[n++] = atomic64_read(&leader->count);
74376 + values[n++] = atomic64_read_unchecked(&leader->count);
74377 if (read_format & PERF_FORMAT_ID)
74378 values[n++] = primary_event_id(leader);
74379
74380 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74381 if (sub != event)
74382 sub->pmu->read(sub);
74383
74384 - values[n++] = atomic64_read(&sub->count);
74385 + values[n++] = atomic64_read_unchecked(&sub->count);
74386 if (read_format & PERF_FORMAT_ID)
74387 values[n++] = primary_event_id(sub);
74388
74389 @@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
74390 * need to add enough zero bytes after the string to handle
74391 * the 64bit alignment we do later.
74392 */
74393 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
74394 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
74395 if (!buf) {
74396 name = strncpy(tmp, "//enomem", sizeof(tmp));
74397 goto got_name;
74398 }
74399 - name = d_path(&file->f_path, buf, PATH_MAX);
74400 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
74401 if (IS_ERR(name)) {
74402 name = strncpy(tmp, "//toolong", sizeof(tmp));
74403 goto got_name;
74404 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
74405 {
74406 struct hw_perf_event *hwc = &event->hw;
74407
74408 - atomic64_add(nr, &event->count);
74409 + atomic64_add_unchecked(nr, &event->count);
74410
74411 if (!hwc->sample_period)
74412 return;
74413 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
74414 u64 now;
74415
74416 now = cpu_clock(cpu);
74417 - prev = atomic64_read(&event->hw.prev_count);
74418 - atomic64_set(&event->hw.prev_count, now);
74419 - atomic64_add(now - prev, &event->count);
74420 + prev = atomic64_read_unchecked(&event->hw.prev_count);
74421 + atomic64_set_unchecked(&event->hw.prev_count, now);
74422 + atomic64_add_unchecked(now - prev, &event->count);
74423 }
74424
74425 static int cpu_clock_perf_event_enable(struct perf_event *event)
74426 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
74427 struct hw_perf_event *hwc = &event->hw;
74428 int cpu = raw_smp_processor_id();
74429
74430 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
74431 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
74432 perf_swevent_start_hrtimer(event);
74433
74434 return 0;
74435 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
74436 u64 prev;
74437 s64 delta;
74438
74439 - prev = atomic64_xchg(&event->hw.prev_count, now);
74440 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
74441 delta = now - prev;
74442 - atomic64_add(delta, &event->count);
74443 + atomic64_add_unchecked(delta, &event->count);
74444 }
74445
74446 static int task_clock_perf_event_enable(struct perf_event *event)
74447 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
74448
74449 now = event->ctx->time;
74450
74451 - atomic64_set(&hwc->prev_count, now);
74452 + atomic64_set_unchecked(&hwc->prev_count, now);
74453
74454 perf_swevent_start_hrtimer(event);
74455
74456 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
74457 event->parent = parent_event;
74458
74459 event->ns = get_pid_ns(current->nsproxy->pid_ns);
74460 - event->id = atomic64_inc_return(&perf_event_id);
74461 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
74462
74463 event->state = PERF_EVENT_STATE_INACTIVE;
74464
74465 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
74466 if (child_event->attr.inherit_stat)
74467 perf_event_read_event(child_event, child);
74468
74469 - child_val = atomic64_read(&child_event->count);
74470 + child_val = atomic64_read_unchecked(&child_event->count);
74471
74472 /*
74473 * Add back the child's count to the parent's count:
74474 */
74475 - atomic64_add(child_val, &parent_event->count);
74476 - atomic64_add(child_event->total_time_enabled,
74477 + atomic64_add_unchecked(child_val, &parent_event->count);
74478 + atomic64_add_unchecked(child_event->total_time_enabled,
74479 &parent_event->child_total_time_enabled);
74480 - atomic64_add(child_event->total_time_running,
74481 + atomic64_add_unchecked(child_event->total_time_running,
74482 &parent_event->child_total_time_running);
74483
74484 /*
74485 diff --git a/kernel/pid.c b/kernel/pid.c
74486 index fce7198..4f23a7e 100644
74487 --- a/kernel/pid.c
74488 +++ b/kernel/pid.c
74489 @@ -33,6 +33,7 @@
74490 #include <linux/rculist.h>
74491 #include <linux/bootmem.h>
74492 #include <linux/hash.h>
74493 +#include <linux/security.h>
74494 #include <linux/pid_namespace.h>
74495 #include <linux/init_task.h>
74496 #include <linux/syscalls.h>
74497 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
74498
74499 int pid_max = PID_MAX_DEFAULT;
74500
74501 -#define RESERVED_PIDS 300
74502 +#define RESERVED_PIDS 500
74503
74504 int pid_max_min = RESERVED_PIDS + 1;
74505 int pid_max_max = PID_MAX_LIMIT;
74506 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
74507 */
74508 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
74509 {
74510 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
74511 + struct task_struct *task;
74512 +
74513 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
74514 +
74515 + if (gr_pid_is_chrooted(task))
74516 + return NULL;
74517 +
74518 + return task;
74519 }
74520
74521 struct task_struct *find_task_by_vpid(pid_t vnr)
74522 @@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
74523 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
74524 }
74525
74526 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
74527 +{
74528 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
74529 +}
74530 +
74531 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
74532 {
74533 struct pid *pid;
74534 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
74535 index 5c9dc22..d271117 100644
74536 --- a/kernel/posix-cpu-timers.c
74537 +++ b/kernel/posix-cpu-timers.c
74538 @@ -6,6 +6,7 @@
74539 #include <linux/posix-timers.h>
74540 #include <linux/errno.h>
74541 #include <linux/math64.h>
74542 +#include <linux/security.h>
74543 #include <asm/uaccess.h>
74544 #include <linux/kernel_stat.h>
74545 #include <trace/events/timer.h>
74546 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
74547
74548 static __init int init_posix_cpu_timers(void)
74549 {
74550 - struct k_clock process = {
74551 + static struct k_clock process = {
74552 .clock_getres = process_cpu_clock_getres,
74553 .clock_get = process_cpu_clock_get,
74554 .clock_set = do_posix_clock_nosettime,
74555 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
74556 .nsleep = process_cpu_nsleep,
74557 .nsleep_restart = process_cpu_nsleep_restart,
74558 };
74559 - struct k_clock thread = {
74560 + static struct k_clock thread = {
74561 .clock_getres = thread_cpu_clock_getres,
74562 .clock_get = thread_cpu_clock_get,
74563 .clock_set = do_posix_clock_nosettime,
74564 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
74565 index 5e76d22..cf1baeb 100644
74566 --- a/kernel/posix-timers.c
74567 +++ b/kernel/posix-timers.c
74568 @@ -42,6 +42,7 @@
74569 #include <linux/compiler.h>
74570 #include <linux/idr.h>
74571 #include <linux/posix-timers.h>
74572 +#include <linux/grsecurity.h>
74573 #include <linux/syscalls.h>
74574 #include <linux/wait.h>
74575 #include <linux/workqueue.h>
74576 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
74577 * which we beg off on and pass to do_sys_settimeofday().
74578 */
74579
74580 -static struct k_clock posix_clocks[MAX_CLOCKS];
74581 +static struct k_clock *posix_clocks[MAX_CLOCKS];
74582
74583 /*
74584 * These ones are defined below.
74585 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
74586 */
74587 #define CLOCK_DISPATCH(clock, call, arglist) \
74588 ((clock) < 0 ? posix_cpu_##call arglist : \
74589 - (posix_clocks[clock].call != NULL \
74590 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
74591 + (posix_clocks[clock]->call != NULL \
74592 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
74593
74594 /*
74595 * Default clock hook functions when the struct k_clock passed
74596 @@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
74597 struct timespec *tp)
74598 {
74599 tp->tv_sec = 0;
74600 - tp->tv_nsec = posix_clocks[which_clock].res;
74601 + tp->tv_nsec = posix_clocks[which_clock]->res;
74602 return 0;
74603 }
74604
74605 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
74606 return 0;
74607 if ((unsigned) which_clock >= MAX_CLOCKS)
74608 return 1;
74609 - if (posix_clocks[which_clock].clock_getres != NULL)
74610 + if (posix_clocks[which_clock] == NULL)
74611 return 0;
74612 - if (posix_clocks[which_clock].res != 0)
74613 + if (posix_clocks[which_clock]->clock_getres != NULL)
74614 + return 0;
74615 + if (posix_clocks[which_clock]->res != 0)
74616 return 0;
74617 return 1;
74618 }
74619 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
74620 */
74621 static __init int init_posix_timers(void)
74622 {
74623 - struct k_clock clock_realtime = {
74624 + static struct k_clock clock_realtime = {
74625 .clock_getres = hrtimer_get_res,
74626 };
74627 - struct k_clock clock_monotonic = {
74628 + static struct k_clock clock_monotonic = {
74629 .clock_getres = hrtimer_get_res,
74630 .clock_get = posix_ktime_get_ts,
74631 .clock_set = do_posix_clock_nosettime,
74632 };
74633 - struct k_clock clock_monotonic_raw = {
74634 + static struct k_clock clock_monotonic_raw = {
74635 .clock_getres = hrtimer_get_res,
74636 .clock_get = posix_get_monotonic_raw,
74637 .clock_set = do_posix_clock_nosettime,
74638 .timer_create = no_timer_create,
74639 .nsleep = no_nsleep,
74640 };
74641 - struct k_clock clock_realtime_coarse = {
74642 + static struct k_clock clock_realtime_coarse = {
74643 .clock_getres = posix_get_coarse_res,
74644 .clock_get = posix_get_realtime_coarse,
74645 .clock_set = do_posix_clock_nosettime,
74646 .timer_create = no_timer_create,
74647 .nsleep = no_nsleep,
74648 };
74649 - struct k_clock clock_monotonic_coarse = {
74650 + static struct k_clock clock_monotonic_coarse = {
74651 .clock_getres = posix_get_coarse_res,
74652 .clock_get = posix_get_monotonic_coarse,
74653 .clock_set = do_posix_clock_nosettime,
74654 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
74655 .nsleep = no_nsleep,
74656 };
74657
74658 + pax_track_stack();
74659 +
74660 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
74661 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
74662 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
74663 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
74664 return;
74665 }
74666
74667 - posix_clocks[clock_id] = *new_clock;
74668 + posix_clocks[clock_id] = new_clock;
74669 }
74670 EXPORT_SYMBOL_GPL(register_posix_clock);
74671
74672 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
74673 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
74674 return -EFAULT;
74675
74676 + /* only the CLOCK_REALTIME clock can be set, all other clocks
74677 + have their clock_set fptr set to a nosettime dummy function
74678 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
74679 + call common_clock_set, which calls do_sys_settimeofday, which
74680 + we hook
74681 + */
74682 +
74683 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
74684 }
74685
74686 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
74687 index 04a9e90..bc355aa 100644
74688 --- a/kernel/power/hibernate.c
74689 +++ b/kernel/power/hibernate.c
74690 @@ -48,14 +48,14 @@ enum {
74691
74692 static int hibernation_mode = HIBERNATION_SHUTDOWN;
74693
74694 -static struct platform_hibernation_ops *hibernation_ops;
74695 +static const struct platform_hibernation_ops *hibernation_ops;
74696
74697 /**
74698 * hibernation_set_ops - set the global hibernate operations
74699 * @ops: the hibernation operations to use in subsequent hibernation transitions
74700 */
74701
74702 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
74703 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
74704 {
74705 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
74706 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
74707 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
74708 index e8b3370..484c2e4 100644
74709 --- a/kernel/power/poweroff.c
74710 +++ b/kernel/power/poweroff.c
74711 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
74712 .enable_mask = SYSRQ_ENABLE_BOOT,
74713 };
74714
74715 -static int pm_sysrq_init(void)
74716 +static int __init pm_sysrq_init(void)
74717 {
74718 register_sysrq_key('o', &sysrq_poweroff_op);
74719 return 0;
74720 diff --git a/kernel/power/process.c b/kernel/power/process.c
74721 index e7cd671..56d5f459 100644
74722 --- a/kernel/power/process.c
74723 +++ b/kernel/power/process.c
74724 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
74725 struct timeval start, end;
74726 u64 elapsed_csecs64;
74727 unsigned int elapsed_csecs;
74728 + bool timedout = false;
74729
74730 do_gettimeofday(&start);
74731
74732 end_time = jiffies + TIMEOUT;
74733 do {
74734 todo = 0;
74735 + if (time_after(jiffies, end_time))
74736 + timedout = true;
74737 read_lock(&tasklist_lock);
74738 do_each_thread(g, p) {
74739 if (frozen(p) || !freezeable(p))
74740 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
74741 * It is "frozen enough". If the task does wake
74742 * up, it will immediately call try_to_freeze.
74743 */
74744 - if (!task_is_stopped_or_traced(p) &&
74745 - !freezer_should_skip(p))
74746 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
74747 todo++;
74748 + if (timedout) {
74749 + printk(KERN_ERR "Task refusing to freeze:\n");
74750 + sched_show_task(p);
74751 + }
74752 + }
74753 } while_each_thread(g, p);
74754 read_unlock(&tasklist_lock);
74755 yield(); /* Yield is okay here */
74756 - if (time_after(jiffies, end_time))
74757 - break;
74758 - } while (todo);
74759 + } while (todo && !timedout);
74760
74761 do_gettimeofday(&end);
74762 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
74763 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
74764 index 40dd021..fb30ceb 100644
74765 --- a/kernel/power/suspend.c
74766 +++ b/kernel/power/suspend.c
74767 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
74768 [PM_SUSPEND_MEM] = "mem",
74769 };
74770
74771 -static struct platform_suspend_ops *suspend_ops;
74772 +static const struct platform_suspend_ops *suspend_ops;
74773
74774 /**
74775 * suspend_set_ops - Set the global suspend method table.
74776 * @ops: Pointer to ops structure.
74777 */
74778 -void suspend_set_ops(struct platform_suspend_ops *ops)
74779 +void suspend_set_ops(const struct platform_suspend_ops *ops)
74780 {
74781 mutex_lock(&pm_mutex);
74782 suspend_ops = ops;
74783 diff --git a/kernel/printk.c b/kernel/printk.c
74784 index 4cade47..4d17900 100644
74785 --- a/kernel/printk.c
74786 +++ b/kernel/printk.c
74787 @@ -33,6 +33,7 @@
74788 #include <linux/bootmem.h>
74789 #include <linux/syscalls.h>
74790 #include <linux/kexec.h>
74791 +#include <linux/syslog.h>
74792
74793 #include <asm/uaccess.h>
74794
74795 @@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
74796 }
74797 #endif
74798
74799 -/*
74800 - * Commands to do_syslog:
74801 - *
74802 - * 0 -- Close the log. Currently a NOP.
74803 - * 1 -- Open the log. Currently a NOP.
74804 - * 2 -- Read from the log.
74805 - * 3 -- Read all messages remaining in the ring buffer.
74806 - * 4 -- Read and clear all messages remaining in the ring buffer
74807 - * 5 -- Clear ring buffer.
74808 - * 6 -- Disable printk's to console
74809 - * 7 -- Enable printk's to console
74810 - * 8 -- Set level of messages printed to console
74811 - * 9 -- Return number of unread characters in the log buffer
74812 - * 10 -- Return size of the log buffer
74813 - */
74814 -int do_syslog(int type, char __user *buf, int len)
74815 +int do_syslog(int type, char __user *buf, int len, bool from_file)
74816 {
74817 unsigned i, j, limit, count;
74818 int do_clear = 0;
74819 char c;
74820 int error = 0;
74821
74822 - error = security_syslog(type);
74823 +#ifdef CONFIG_GRKERNSEC_DMESG
74824 + if (grsec_enable_dmesg &&
74825 + (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
74826 + !capable(CAP_SYS_ADMIN))
74827 + return -EPERM;
74828 +#endif
74829 +
74830 + error = security_syslog(type, from_file);
74831 if (error)
74832 return error;
74833
74834 switch (type) {
74835 - case 0: /* Close log */
74836 + case SYSLOG_ACTION_CLOSE: /* Close log */
74837 break;
74838 - case 1: /* Open log */
74839 + case SYSLOG_ACTION_OPEN: /* Open log */
74840 break;
74841 - case 2: /* Read from log */
74842 + case SYSLOG_ACTION_READ: /* Read from log */
74843 error = -EINVAL;
74844 if (!buf || len < 0)
74845 goto out;
74846 @@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
74847 if (!error)
74848 error = i;
74849 break;
74850 - case 4: /* Read/clear last kernel messages */
74851 + /* Read/clear last kernel messages */
74852 + case SYSLOG_ACTION_READ_CLEAR:
74853 do_clear = 1;
74854 /* FALL THRU */
74855 - case 3: /* Read last kernel messages */
74856 + /* Read last kernel messages */
74857 + case SYSLOG_ACTION_READ_ALL:
74858 error = -EINVAL;
74859 if (!buf || len < 0)
74860 goto out;
74861 @@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
74862 }
74863 }
74864 break;
74865 - case 5: /* Clear ring buffer */
74866 + /* Clear ring buffer */
74867 + case SYSLOG_ACTION_CLEAR:
74868 logged_chars = 0;
74869 break;
74870 - case 6: /* Disable logging to console */
74871 + /* Disable logging to console */
74872 + case SYSLOG_ACTION_CONSOLE_OFF:
74873 if (saved_console_loglevel == -1)
74874 saved_console_loglevel = console_loglevel;
74875 console_loglevel = minimum_console_loglevel;
74876 break;
74877 - case 7: /* Enable logging to console */
74878 + /* Enable logging to console */
74879 + case SYSLOG_ACTION_CONSOLE_ON:
74880 if (saved_console_loglevel != -1) {
74881 console_loglevel = saved_console_loglevel;
74882 saved_console_loglevel = -1;
74883 }
74884 break;
74885 - case 8: /* Set level of messages printed to console */
74886 + /* Set level of messages printed to console */
74887 + case SYSLOG_ACTION_CONSOLE_LEVEL:
74888 error = -EINVAL;
74889 if (len < 1 || len > 8)
74890 goto out;
74891 @@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
74892 saved_console_loglevel = -1;
74893 error = 0;
74894 break;
74895 - case 9: /* Number of chars in the log buffer */
74896 + /* Number of chars in the log buffer */
74897 + case SYSLOG_ACTION_SIZE_UNREAD:
74898 error = log_end - log_start;
74899 break;
74900 - case 10: /* Size of the log buffer */
74901 + /* Size of the log buffer */
74902 + case SYSLOG_ACTION_SIZE_BUFFER:
74903 error = log_buf_len;
74904 break;
74905 default:
74906 @@ -415,7 +416,7 @@ out:
74907
74908 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
74909 {
74910 - return do_syslog(type, buf, len);
74911 + return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
74912 }
74913
74914 /*
74915 diff --git a/kernel/profile.c b/kernel/profile.c
74916 index dfadc5b..7f59404 100644
74917 --- a/kernel/profile.c
74918 +++ b/kernel/profile.c
74919 @@ -39,7 +39,7 @@ struct profile_hit {
74920 /* Oprofile timer tick hook */
74921 static int (*timer_hook)(struct pt_regs *) __read_mostly;
74922
74923 -static atomic_t *prof_buffer;
74924 +static atomic_unchecked_t *prof_buffer;
74925 static unsigned long prof_len, prof_shift;
74926
74927 int prof_on __read_mostly;
74928 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
74929 hits[i].pc = 0;
74930 continue;
74931 }
74932 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74933 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74934 hits[i].hits = hits[i].pc = 0;
74935 }
74936 }
74937 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74938 * Add the current hit(s) and flush the write-queue out
74939 * to the global buffer:
74940 */
74941 - atomic_add(nr_hits, &prof_buffer[pc]);
74942 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
74943 for (i = 0; i < NR_PROFILE_HIT; ++i) {
74944 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74945 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74946 hits[i].pc = hits[i].hits = 0;
74947 }
74948 out:
74949 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74950 if (prof_on != type || !prof_buffer)
74951 return;
74952 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
74953 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74954 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74955 }
74956 #endif /* !CONFIG_SMP */
74957 EXPORT_SYMBOL_GPL(profile_hits);
74958 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
74959 return -EFAULT;
74960 buf++; p++; count--; read++;
74961 }
74962 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
74963 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
74964 if (copy_to_user(buf, (void *)pnt, count))
74965 return -EFAULT;
74966 read += count;
74967 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
74968 }
74969 #endif
74970 profile_discard_flip_buffers();
74971 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
74972 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
74973 return count;
74974 }
74975
74976 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
74977 index 05625f6..733bf70 100644
74978 --- a/kernel/ptrace.c
74979 +++ b/kernel/ptrace.c
74980 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
74981 return ret;
74982 }
74983
74984 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74985 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
74986 + unsigned int log)
74987 {
74988 const struct cred *cred = current_cred(), *tcred;
74989
74990 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74991 cred->gid != tcred->egid ||
74992 cred->gid != tcred->sgid ||
74993 cred->gid != tcred->gid) &&
74994 - !capable(CAP_SYS_PTRACE)) {
74995 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74996 + (log && !capable(CAP_SYS_PTRACE)))
74997 + ) {
74998 rcu_read_unlock();
74999 return -EPERM;
75000 }
75001 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
75002 smp_rmb();
75003 if (task->mm)
75004 dumpable = get_dumpable(task->mm);
75005 - if (!dumpable && !capable(CAP_SYS_PTRACE))
75006 + if (!dumpable &&
75007 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
75008 + (log && !capable(CAP_SYS_PTRACE))))
75009 return -EPERM;
75010
75011 return security_ptrace_access_check(task, mode);
75012 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
75013 {
75014 int err;
75015 task_lock(task);
75016 - err = __ptrace_may_access(task, mode);
75017 + err = __ptrace_may_access(task, mode, 0);
75018 + task_unlock(task);
75019 + return !err;
75020 +}
75021 +
75022 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
75023 +{
75024 + int err;
75025 + task_lock(task);
75026 + err = __ptrace_may_access(task, mode, 1);
75027 task_unlock(task);
75028 return !err;
75029 }
75030 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
75031 goto out;
75032
75033 task_lock(task);
75034 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
75035 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
75036 task_unlock(task);
75037 if (retval)
75038 goto unlock_creds;
75039 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
75040 goto unlock_tasklist;
75041
75042 task->ptrace = PT_PTRACED;
75043 - if (capable(CAP_SYS_PTRACE))
75044 + if (capable_nolog(CAP_SYS_PTRACE))
75045 task->ptrace |= PT_PTRACE_CAP;
75046
75047 __ptrace_link(task, current);
75048 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
75049 {
75050 int copied = 0;
75051
75052 + pax_track_stack();
75053 +
75054 while (len > 0) {
75055 char buf[128];
75056 int this_len, retval;
75057 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
75058 {
75059 int copied = 0;
75060
75061 + pax_track_stack();
75062 +
75063 while (len > 0) {
75064 char buf[128];
75065 int this_len, retval;
75066 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
75067 int ret = -EIO;
75068 siginfo_t siginfo;
75069
75070 + pax_track_stack();
75071 +
75072 switch (request) {
75073 case PTRACE_PEEKTEXT:
75074 case PTRACE_PEEKDATA:
75075 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
75076 ret = ptrace_setoptions(child, data);
75077 break;
75078 case PTRACE_GETEVENTMSG:
75079 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
75080 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
75081 break;
75082
75083 case PTRACE_GETSIGINFO:
75084 ret = ptrace_getsiginfo(child, &siginfo);
75085 if (!ret)
75086 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
75087 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
75088 &siginfo);
75089 break;
75090
75091 case PTRACE_SETSIGINFO:
75092 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
75093 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
75094 sizeof siginfo))
75095 ret = -EFAULT;
75096 else
75097 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
75098 goto out;
75099 }
75100
75101 + if (gr_handle_ptrace(child, request)) {
75102 + ret = -EPERM;
75103 + goto out_put_task_struct;
75104 + }
75105 +
75106 if (request == PTRACE_ATTACH) {
75107 ret = ptrace_attach(child);
75108 /*
75109 * Some architectures need to do book-keeping after
75110 * a ptrace attach.
75111 */
75112 - if (!ret)
75113 + if (!ret) {
75114 arch_ptrace_attach(child);
75115 + gr_audit_ptrace(child);
75116 + }
75117 goto out_put_task_struct;
75118 }
75119
75120 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
75121 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
75122 if (copied != sizeof(tmp))
75123 return -EIO;
75124 - return put_user(tmp, (unsigned long __user *)data);
75125 + return put_user(tmp, (__force unsigned long __user *)data);
75126 }
75127
75128 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
75129 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
75130 siginfo_t siginfo;
75131 int ret;
75132
75133 + pax_track_stack();
75134 +
75135 switch (request) {
75136 case PTRACE_PEEKTEXT:
75137 case PTRACE_PEEKDATA:
75138 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
75139 goto out;
75140 }
75141
75142 + if (gr_handle_ptrace(child, request)) {
75143 + ret = -EPERM;
75144 + goto out_put_task_struct;
75145 + }
75146 +
75147 if (request == PTRACE_ATTACH) {
75148 ret = ptrace_attach(child);
75149 /*
75150 * Some architectures need to do book-keeping after
75151 * a ptrace attach.
75152 */
75153 - if (!ret)
75154 + if (!ret) {
75155 arch_ptrace_attach(child);
75156 + gr_audit_ptrace(child);
75157 + }
75158 goto out_put_task_struct;
75159 }
75160
75161 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
75162 index 697c0a0..2402696 100644
75163 --- a/kernel/rcutorture.c
75164 +++ b/kernel/rcutorture.c
75165 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
75166 { 0 };
75167 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
75168 { 0 };
75169 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
75170 -static atomic_t n_rcu_torture_alloc;
75171 -static atomic_t n_rcu_torture_alloc_fail;
75172 -static atomic_t n_rcu_torture_free;
75173 -static atomic_t n_rcu_torture_mberror;
75174 -static atomic_t n_rcu_torture_error;
75175 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
75176 +static atomic_unchecked_t n_rcu_torture_alloc;
75177 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
75178 +static atomic_unchecked_t n_rcu_torture_free;
75179 +static atomic_unchecked_t n_rcu_torture_mberror;
75180 +static atomic_unchecked_t n_rcu_torture_error;
75181 static long n_rcu_torture_timers;
75182 static struct list_head rcu_torture_removed;
75183 static cpumask_var_t shuffle_tmp_mask;
75184 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
75185
75186 spin_lock_bh(&rcu_torture_lock);
75187 if (list_empty(&rcu_torture_freelist)) {
75188 - atomic_inc(&n_rcu_torture_alloc_fail);
75189 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
75190 spin_unlock_bh(&rcu_torture_lock);
75191 return NULL;
75192 }
75193 - atomic_inc(&n_rcu_torture_alloc);
75194 + atomic_inc_unchecked(&n_rcu_torture_alloc);
75195 p = rcu_torture_freelist.next;
75196 list_del_init(p);
75197 spin_unlock_bh(&rcu_torture_lock);
75198 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
75199 static void
75200 rcu_torture_free(struct rcu_torture *p)
75201 {
75202 - atomic_inc(&n_rcu_torture_free);
75203 + atomic_inc_unchecked(&n_rcu_torture_free);
75204 spin_lock_bh(&rcu_torture_lock);
75205 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
75206 spin_unlock_bh(&rcu_torture_lock);
75207 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
75208 i = rp->rtort_pipe_count;
75209 if (i > RCU_TORTURE_PIPE_LEN)
75210 i = RCU_TORTURE_PIPE_LEN;
75211 - atomic_inc(&rcu_torture_wcount[i]);
75212 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
75213 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
75214 rp->rtort_mbtest = 0;
75215 rcu_torture_free(rp);
75216 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
75217 i = rp->rtort_pipe_count;
75218 if (i > RCU_TORTURE_PIPE_LEN)
75219 i = RCU_TORTURE_PIPE_LEN;
75220 - atomic_inc(&rcu_torture_wcount[i]);
75221 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
75222 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
75223 rp->rtort_mbtest = 0;
75224 list_del(&rp->rtort_free);
75225 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
75226 i = old_rp->rtort_pipe_count;
75227 if (i > RCU_TORTURE_PIPE_LEN)
75228 i = RCU_TORTURE_PIPE_LEN;
75229 - atomic_inc(&rcu_torture_wcount[i]);
75230 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
75231 old_rp->rtort_pipe_count++;
75232 cur_ops->deferred_free(old_rp);
75233 }
75234 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
75235 return;
75236 }
75237 if (p->rtort_mbtest == 0)
75238 - atomic_inc(&n_rcu_torture_mberror);
75239 + atomic_inc_unchecked(&n_rcu_torture_mberror);
75240 spin_lock(&rand_lock);
75241 cur_ops->read_delay(&rand);
75242 n_rcu_torture_timers++;
75243 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
75244 continue;
75245 }
75246 if (p->rtort_mbtest == 0)
75247 - atomic_inc(&n_rcu_torture_mberror);
75248 + atomic_inc_unchecked(&n_rcu_torture_mberror);
75249 cur_ops->read_delay(&rand);
75250 preempt_disable();
75251 pipe_count = p->rtort_pipe_count;
75252 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
75253 rcu_torture_current,
75254 rcu_torture_current_version,
75255 list_empty(&rcu_torture_freelist),
75256 - atomic_read(&n_rcu_torture_alloc),
75257 - atomic_read(&n_rcu_torture_alloc_fail),
75258 - atomic_read(&n_rcu_torture_free),
75259 - atomic_read(&n_rcu_torture_mberror),
75260 + atomic_read_unchecked(&n_rcu_torture_alloc),
75261 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
75262 + atomic_read_unchecked(&n_rcu_torture_free),
75263 + atomic_read_unchecked(&n_rcu_torture_mberror),
75264 n_rcu_torture_timers);
75265 - if (atomic_read(&n_rcu_torture_mberror) != 0)
75266 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
75267 cnt += sprintf(&page[cnt], " !!!");
75268 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
75269 if (i > 1) {
75270 cnt += sprintf(&page[cnt], "!!! ");
75271 - atomic_inc(&n_rcu_torture_error);
75272 + atomic_inc_unchecked(&n_rcu_torture_error);
75273 WARN_ON_ONCE(1);
75274 }
75275 cnt += sprintf(&page[cnt], "Reader Pipe: ");
75276 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
75277 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
75278 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75279 cnt += sprintf(&page[cnt], " %d",
75280 - atomic_read(&rcu_torture_wcount[i]));
75281 + atomic_read_unchecked(&rcu_torture_wcount[i]));
75282 }
75283 cnt += sprintf(&page[cnt], "\n");
75284 if (cur_ops->stats)
75285 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
75286
75287 if (cur_ops->cleanup)
75288 cur_ops->cleanup();
75289 - if (atomic_read(&n_rcu_torture_error))
75290 + if (atomic_read_unchecked(&n_rcu_torture_error))
75291 rcu_torture_print_module_parms("End of test: FAILURE");
75292 else
75293 rcu_torture_print_module_parms("End of test: SUCCESS");
75294 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
75295
75296 rcu_torture_current = NULL;
75297 rcu_torture_current_version = 0;
75298 - atomic_set(&n_rcu_torture_alloc, 0);
75299 - atomic_set(&n_rcu_torture_alloc_fail, 0);
75300 - atomic_set(&n_rcu_torture_free, 0);
75301 - atomic_set(&n_rcu_torture_mberror, 0);
75302 - atomic_set(&n_rcu_torture_error, 0);
75303 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
75304 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
75305 + atomic_set_unchecked(&n_rcu_torture_free, 0);
75306 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
75307 + atomic_set_unchecked(&n_rcu_torture_error, 0);
75308 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
75309 - atomic_set(&rcu_torture_wcount[i], 0);
75310 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
75311 for_each_possible_cpu(cpu) {
75312 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75313 per_cpu(rcu_torture_count, cpu)[i] = 0;
75314 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
75315 index 683c4f3..97f54c6 100644
75316 --- a/kernel/rcutree.c
75317 +++ b/kernel/rcutree.c
75318 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
75319 /*
75320 * Do softirq processing for the current CPU.
75321 */
75322 -static void rcu_process_callbacks(struct softirq_action *unused)
75323 +static void rcu_process_callbacks(void)
75324 {
75325 /*
75326 * Memory references from any prior RCU read-side critical sections
75327 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
75328 index c03edf7..ac1b341 100644
75329 --- a/kernel/rcutree_plugin.h
75330 +++ b/kernel/rcutree_plugin.h
75331 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
75332 */
75333 void __rcu_read_lock(void)
75334 {
75335 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
75336 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
75337 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
75338 }
75339 EXPORT_SYMBOL_GPL(__rcu_read_lock);
75340 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
75341 struct task_struct *t = current;
75342
75343 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
75344 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
75345 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
75346 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
75347 rcu_read_unlock_special(t);
75348 }
75349 diff --git a/kernel/relay.c b/kernel/relay.c
75350 index bf343f5..908e9ee 100644
75351 --- a/kernel/relay.c
75352 +++ b/kernel/relay.c
75353 @@ -1228,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
75354 unsigned int flags,
75355 int *nonpad_ret)
75356 {
75357 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
75358 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
75359 struct rchan_buf *rbuf = in->private_data;
75360 unsigned int subbuf_size = rbuf->chan->subbuf_size;
75361 uint64_t pos = (uint64_t) *ppos;
75362 @@ -1247,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
75363 .ops = &relay_pipe_buf_ops,
75364 .spd_release = relay_page_release,
75365 };
75366 + ssize_t ret;
75367 +
75368 + pax_track_stack();
75369
75370 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
75371 return 0;
75372 diff --git a/kernel/resource.c b/kernel/resource.c
75373 index fb11a58..4e61ae1 100644
75374 --- a/kernel/resource.c
75375 +++ b/kernel/resource.c
75376 @@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
75377
75378 static int __init ioresources_init(void)
75379 {
75380 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
75381 +#ifdef CONFIG_GRKERNSEC_PROC_USER
75382 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
75383 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
75384 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75385 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
75386 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
75387 +#endif
75388 +#else
75389 proc_create("ioports", 0, NULL, &proc_ioports_operations);
75390 proc_create("iomem", 0, NULL, &proc_iomem_operations);
75391 +#endif
75392 return 0;
75393 }
75394 __initcall(ioresources_init);
75395 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
75396 index a56f629..1fc4989 100644
75397 --- a/kernel/rtmutex-tester.c
75398 +++ b/kernel/rtmutex-tester.c
75399 @@ -21,7 +21,7 @@
75400 #define MAX_RT_TEST_MUTEXES 8
75401
75402 static spinlock_t rttest_lock;
75403 -static atomic_t rttest_event;
75404 +static atomic_unchecked_t rttest_event;
75405
75406 struct test_thread_data {
75407 int opcode;
75408 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75409
75410 case RTTEST_LOCKCONT:
75411 td->mutexes[td->opdata] = 1;
75412 - td->event = atomic_add_return(1, &rttest_event);
75413 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75414 return 0;
75415
75416 case RTTEST_RESET:
75417 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75418 return 0;
75419
75420 case RTTEST_RESETEVENT:
75421 - atomic_set(&rttest_event, 0);
75422 + atomic_set_unchecked(&rttest_event, 0);
75423 return 0;
75424
75425 default:
75426 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75427 return ret;
75428
75429 td->mutexes[id] = 1;
75430 - td->event = atomic_add_return(1, &rttest_event);
75431 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75432 rt_mutex_lock(&mutexes[id]);
75433 - td->event = atomic_add_return(1, &rttest_event);
75434 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75435 td->mutexes[id] = 4;
75436 return 0;
75437
75438 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75439 return ret;
75440
75441 td->mutexes[id] = 1;
75442 - td->event = atomic_add_return(1, &rttest_event);
75443 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75444 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
75445 - td->event = atomic_add_return(1, &rttest_event);
75446 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75447 td->mutexes[id] = ret ? 0 : 4;
75448 return ret ? -EINTR : 0;
75449
75450 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75451 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
75452 return ret;
75453
75454 - td->event = atomic_add_return(1, &rttest_event);
75455 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75456 rt_mutex_unlock(&mutexes[id]);
75457 - td->event = atomic_add_return(1, &rttest_event);
75458 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75459 td->mutexes[id] = 0;
75460 return 0;
75461
75462 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75463 break;
75464
75465 td->mutexes[dat] = 2;
75466 - td->event = atomic_add_return(1, &rttest_event);
75467 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75468 break;
75469
75470 case RTTEST_LOCKBKL:
75471 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75472 return;
75473
75474 td->mutexes[dat] = 3;
75475 - td->event = atomic_add_return(1, &rttest_event);
75476 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75477 break;
75478
75479 case RTTEST_LOCKNOWAIT:
75480 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75481 return;
75482
75483 td->mutexes[dat] = 1;
75484 - td->event = atomic_add_return(1, &rttest_event);
75485 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75486 return;
75487
75488 case RTTEST_LOCKBKL:
75489 diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
75490 index 29bd4ba..8c5de90 100644
75491 --- a/kernel/rtmutex.c
75492 +++ b/kernel/rtmutex.c
75493 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
75494 */
75495 spin_lock_irqsave(&pendowner->pi_lock, flags);
75496
75497 - WARN_ON(!pendowner->pi_blocked_on);
75498 + BUG_ON(!pendowner->pi_blocked_on);
75499 WARN_ON(pendowner->pi_blocked_on != waiter);
75500 WARN_ON(pendowner->pi_blocked_on->lock != lock);
75501
75502 diff --git a/kernel/sched.c b/kernel/sched.c
75503 index 0591df8..e3af3a4 100644
75504 --- a/kernel/sched.c
75505 +++ b/kernel/sched.c
75506 @@ -5043,7 +5043,7 @@ out:
75507 * In CONFIG_NO_HZ case, the idle load balance owner will do the
75508 * rebalancing for all the cpus for whom scheduler ticks are stopped.
75509 */
75510 -static void run_rebalance_domains(struct softirq_action *h)
75511 +static void run_rebalance_domains(void)
75512 {
75513 int this_cpu = smp_processor_id();
75514 struct rq *this_rq = cpu_rq(this_cpu);
75515 @@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
75516 }
75517 }
75518
75519 +#ifdef CONFIG_GRKERNSEC_SETXID
75520 +extern void gr_delayed_cred_worker(void);
75521 +static inline void gr_cred_schedule(void)
75522 +{
75523 + if (unlikely(current->delayed_cred))
75524 + gr_delayed_cred_worker();
75525 +}
75526 +#else
75527 +static inline void gr_cred_schedule(void)
75528 +{
75529 +}
75530 +#endif
75531 +
75532 /*
75533 * schedule() is the main scheduler function.
75534 */
75535 @@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
75536 struct rq *rq;
75537 int cpu;
75538
75539 + pax_track_stack();
75540 +
75541 need_resched:
75542 preempt_disable();
75543 cpu = smp_processor_id();
75544 @@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
75545
75546 schedule_debug(prev);
75547
75548 + gr_cred_schedule();
75549 +
75550 if (sched_feat(HRTICK))
75551 hrtick_clear(rq);
75552
75553 @@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
75554 * Look out! "owner" is an entirely speculative pointer
75555 * access and not reliable.
75556 */
75557 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75558 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
75559 {
75560 unsigned int cpu;
75561 struct rq *rq;
75562 @@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75563 * DEBUG_PAGEALLOC could have unmapped it if
75564 * the mutex owner just released it and exited.
75565 */
75566 - if (probe_kernel_address(&owner->cpu, cpu))
75567 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
75568 return 0;
75569 #else
75570 - cpu = owner->cpu;
75571 + cpu = task_thread_info(owner)->cpu;
75572 #endif
75573
75574 /*
75575 @@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75576 /*
75577 * Is that owner really running on that cpu?
75578 */
75579 - if (task_thread_info(rq->curr) != owner || need_resched())
75580 + if (rq->curr != owner || need_resched())
75581 return 0;
75582
75583 cpu_relax();
75584 @@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
75585 /* convert nice value [19,-20] to rlimit style value [1,40] */
75586 int nice_rlim = 20 - nice;
75587
75588 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
75589 +
75590 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
75591 capable(CAP_SYS_NICE));
75592 }
75593 @@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
75594 if (nice > 19)
75595 nice = 19;
75596
75597 - if (increment < 0 && !can_nice(current, nice))
75598 + if (increment < 0 && (!can_nice(current, nice) ||
75599 + gr_handle_chroot_nice()))
75600 return -EPERM;
75601
75602 retval = security_task_setnice(current, nice);
75603 @@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
75604 long power;
75605 int weight;
75606
75607 - WARN_ON(!sd || !sd->groups);
75608 + BUG_ON(!sd || !sd->groups);
75609
75610 if (cpu != group_first_cpu(sd->groups))
75611 return;
75612 diff --git a/kernel/signal.c b/kernel/signal.c
75613 index 2494827..cda80a0 100644
75614 --- a/kernel/signal.c
75615 +++ b/kernel/signal.c
75616 @@ -41,12 +41,12 @@
75617
75618 static struct kmem_cache *sigqueue_cachep;
75619
75620 -static void __user *sig_handler(struct task_struct *t, int sig)
75621 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
75622 {
75623 return t->sighand->action[sig - 1].sa.sa_handler;
75624 }
75625
75626 -static int sig_handler_ignored(void __user *handler, int sig)
75627 +static int sig_handler_ignored(__sighandler_t handler, int sig)
75628 {
75629 /* Is it explicitly or implicitly ignored? */
75630 return handler == SIG_IGN ||
75631 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
75632 static int sig_task_ignored(struct task_struct *t, int sig,
75633 int from_ancestor_ns)
75634 {
75635 - void __user *handler;
75636 + __sighandler_t handler;
75637
75638 handler = sig_handler(t, sig);
75639
75640 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
75641 */
75642 user = get_uid(__task_cred(t)->user);
75643 atomic_inc(&user->sigpending);
75644 +
75645 + if (!override_rlimit)
75646 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
75647 if (override_rlimit ||
75648 atomic_read(&user->sigpending) <=
75649 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
75650 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
75651
75652 int unhandled_signal(struct task_struct *tsk, int sig)
75653 {
75654 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
75655 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
75656 if (is_global_init(tsk))
75657 return 1;
75658 if (handler != SIG_IGN && handler != SIG_DFL)
75659 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
75660 }
75661 }
75662
75663 + /* allow glibc communication via tgkill to other threads in our
75664 + thread group */
75665 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
75666 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
75667 + && gr_handle_signal(t, sig))
75668 + return -EPERM;
75669 +
75670 return security_task_kill(t, info, sig, 0);
75671 }
75672
75673 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75674 return send_signal(sig, info, p, 1);
75675 }
75676
75677 -static int
75678 +int
75679 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75680 {
75681 return send_signal(sig, info, t, 0);
75682 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75683 unsigned long int flags;
75684 int ret, blocked, ignored;
75685 struct k_sigaction *action;
75686 + int is_unhandled = 0;
75687
75688 spin_lock_irqsave(&t->sighand->siglock, flags);
75689 action = &t->sighand->action[sig-1];
75690 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75691 }
75692 if (action->sa.sa_handler == SIG_DFL)
75693 t->signal->flags &= ~SIGNAL_UNKILLABLE;
75694 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
75695 + is_unhandled = 1;
75696 ret = specific_send_sig_info(sig, info, t);
75697 spin_unlock_irqrestore(&t->sighand->siglock, flags);
75698
75699 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
75700 + normal operation */
75701 + if (is_unhandled) {
75702 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
75703 + gr_handle_crash(t, sig);
75704 + }
75705 +
75706 return ret;
75707 }
75708
75709 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75710 {
75711 int ret = check_kill_permission(sig, info, p);
75712
75713 - if (!ret && sig)
75714 + if (!ret && sig) {
75715 ret = do_send_sig_info(sig, info, p, true);
75716 + if (!ret)
75717 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
75718 + }
75719
75720 return ret;
75721 }
75722 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
75723 {
75724 siginfo_t info;
75725
75726 + pax_track_stack();
75727 +
75728 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
75729
75730 memset(&info, 0, sizeof info);
75731 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
75732 int error = -ESRCH;
75733
75734 rcu_read_lock();
75735 - p = find_task_by_vpid(pid);
75736 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75737 + /* allow glibc communication via tgkill to other threads in our
75738 + thread group */
75739 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
75740 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
75741 + p = find_task_by_vpid_unrestricted(pid);
75742 + else
75743 +#endif
75744 + p = find_task_by_vpid(pid);
75745 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
75746 error = check_kill_permission(sig, info, p);
75747 /*
75748 diff --git a/kernel/smp.c b/kernel/smp.c
75749 index aa9cff3..631a0de 100644
75750 --- a/kernel/smp.c
75751 +++ b/kernel/smp.c
75752 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
75753 }
75754 EXPORT_SYMBOL(smp_call_function);
75755
75756 -void ipi_call_lock(void)
75757 +void ipi_call_lock(void) __acquires(call_function.lock)
75758 {
75759 spin_lock(&call_function.lock);
75760 }
75761
75762 -void ipi_call_unlock(void)
75763 +void ipi_call_unlock(void) __releases(call_function.lock)
75764 {
75765 spin_unlock(&call_function.lock);
75766 }
75767
75768 -void ipi_call_lock_irq(void)
75769 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
75770 {
75771 spin_lock_irq(&call_function.lock);
75772 }
75773
75774 -void ipi_call_unlock_irq(void)
75775 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
75776 {
75777 spin_unlock_irq(&call_function.lock);
75778 }
75779 diff --git a/kernel/softirq.c b/kernel/softirq.c
75780 index 04a0252..580c512 100644
75781 --- a/kernel/softirq.c
75782 +++ b/kernel/softirq.c
75783 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
75784
75785 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
75786
75787 -char *softirq_to_name[NR_SOFTIRQS] = {
75788 +const char * const softirq_to_name[NR_SOFTIRQS] = {
75789 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
75790 "TASKLET", "SCHED", "HRTIMER", "RCU"
75791 };
75792 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
75793
75794 asmlinkage void __do_softirq(void)
75795 {
75796 - struct softirq_action *h;
75797 + const struct softirq_action *h;
75798 __u32 pending;
75799 int max_restart = MAX_SOFTIRQ_RESTART;
75800 int cpu;
75801 @@ -233,7 +233,7 @@ restart:
75802 kstat_incr_softirqs_this_cpu(h - softirq_vec);
75803
75804 trace_softirq_entry(h, softirq_vec);
75805 - h->action(h);
75806 + h->action();
75807 trace_softirq_exit(h, softirq_vec);
75808 if (unlikely(prev_count != preempt_count())) {
75809 printk(KERN_ERR "huh, entered softirq %td %s %p"
75810 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
75811 local_irq_restore(flags);
75812 }
75813
75814 -void open_softirq(int nr, void (*action)(struct softirq_action *))
75815 +void open_softirq(int nr, void (*action)(void))
75816 {
75817 - softirq_vec[nr].action = action;
75818 + pax_open_kernel();
75819 + *(void **)&softirq_vec[nr].action = action;
75820 + pax_close_kernel();
75821 }
75822
75823 /*
75824 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
75825
75826 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
75827
75828 -static void tasklet_action(struct softirq_action *a)
75829 +static void tasklet_action(void)
75830 {
75831 struct tasklet_struct *list;
75832
75833 @@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
75834 }
75835 }
75836
75837 -static void tasklet_hi_action(struct softirq_action *a)
75838 +static void tasklet_hi_action(void)
75839 {
75840 struct tasklet_struct *list;
75841
75842 diff --git a/kernel/sys.c b/kernel/sys.c
75843 index e9512b1..f07185f 100644
75844 --- a/kernel/sys.c
75845 +++ b/kernel/sys.c
75846 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
75847 error = -EACCES;
75848 goto out;
75849 }
75850 +
75851 + if (gr_handle_chroot_setpriority(p, niceval)) {
75852 + error = -EACCES;
75853 + goto out;
75854 + }
75855 +
75856 no_nice = security_task_setnice(p, niceval);
75857 if (no_nice) {
75858 error = no_nice;
75859 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
75860 !(user = find_user(who)))
75861 goto out_unlock; /* No processes for this user */
75862
75863 - do_each_thread(g, p)
75864 + do_each_thread(g, p) {
75865 if (__task_cred(p)->uid == who)
75866 error = set_one_prio(p, niceval, error);
75867 - while_each_thread(g, p);
75868 + } while_each_thread(g, p);
75869 if (who != cred->uid)
75870 free_uid(user); /* For find_user() */
75871 break;
75872 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
75873 !(user = find_user(who)))
75874 goto out_unlock; /* No processes for this user */
75875
75876 - do_each_thread(g, p)
75877 + do_each_thread(g, p) {
75878 if (__task_cred(p)->uid == who) {
75879 niceval = 20 - task_nice(p);
75880 if (niceval > retval)
75881 retval = niceval;
75882 }
75883 - while_each_thread(g, p);
75884 + } while_each_thread(g, p);
75885 if (who != cred->uid)
75886 free_uid(user); /* for find_user() */
75887 break;
75888 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
75889 goto error;
75890 }
75891
75892 + if (gr_check_group_change(new->gid, new->egid, -1))
75893 + goto error;
75894 +
75895 if (rgid != (gid_t) -1 ||
75896 (egid != (gid_t) -1 && egid != old->gid))
75897 new->sgid = new->egid;
75898 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
75899 goto error;
75900
75901 retval = -EPERM;
75902 +
75903 + if (gr_check_group_change(gid, gid, gid))
75904 + goto error;
75905 +
75906 if (capable(CAP_SETGID))
75907 new->gid = new->egid = new->sgid = new->fsgid = gid;
75908 else if (gid == old->gid || gid == old->sgid)
75909 @@ -559,7 +572,7 @@ error:
75910 /*
75911 * change the user struct in a credentials set to match the new UID
75912 */
75913 -static int set_user(struct cred *new)
75914 +int set_user(struct cred *new)
75915 {
75916 struct user_struct *new_user;
75917
75918 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
75919 if (!new_user)
75920 return -EAGAIN;
75921
75922 + /*
75923 + * We don't fail in case of NPROC limit excess here because too many
75924 + * poorly written programs don't check set*uid() return code, assuming
75925 + * it never fails if called by root. We may still enforce NPROC limit
75926 + * for programs doing set*uid()+execve() by harmlessly deferring the
75927 + * failure to the execve() stage.
75928 + */
75929 if (atomic_read(&new_user->processes) >=
75930 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
75931 - new_user != INIT_USER) {
75932 - free_uid(new_user);
75933 - return -EAGAIN;
75934 - }
75935 + new_user != INIT_USER)
75936 + current->flags |= PF_NPROC_EXCEEDED;
75937 + else
75938 + current->flags &= ~PF_NPROC_EXCEEDED;
75939
75940 free_uid(new->user);
75941 new->user = new_user;
75942 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
75943 goto error;
75944 }
75945
75946 + if (gr_check_user_change(new->uid, new->euid, -1))
75947 + goto error;
75948 +
75949 if (new->uid != old->uid) {
75950 retval = set_user(new);
75951 if (retval < 0)
75952 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
75953 goto error;
75954
75955 retval = -EPERM;
75956 +
75957 + if (gr_check_crash_uid(uid))
75958 + goto error;
75959 + if (gr_check_user_change(uid, uid, uid))
75960 + goto error;
75961 +
75962 if (capable(CAP_SETUID)) {
75963 new->suid = new->uid = uid;
75964 if (uid != old->uid) {
75965 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
75966 goto error;
75967 }
75968
75969 + if (gr_check_user_change(ruid, euid, -1))
75970 + goto error;
75971 +
75972 if (ruid != (uid_t) -1) {
75973 new->uid = ruid;
75974 if (ruid != old->uid) {
75975 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
75976 goto error;
75977 }
75978
75979 + if (gr_check_group_change(rgid, egid, -1))
75980 + goto error;
75981 +
75982 if (rgid != (gid_t) -1)
75983 new->gid = rgid;
75984 if (egid != (gid_t) -1)
75985 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
75986 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
75987 goto error;
75988
75989 + if (gr_check_user_change(-1, -1, uid))
75990 + goto error;
75991 +
75992 if (uid == old->uid || uid == old->euid ||
75993 uid == old->suid || uid == old->fsuid ||
75994 capable(CAP_SETUID)) {
75995 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
75996 if (gid == old->gid || gid == old->egid ||
75997 gid == old->sgid || gid == old->fsgid ||
75998 capable(CAP_SETGID)) {
75999 + if (gr_check_group_change(-1, -1, gid))
76000 + goto error;
76001 +
76002 if (gid != old_fsgid) {
76003 new->fsgid = gid;
76004 goto change_okay;
76005 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
76006 error = get_dumpable(me->mm);
76007 break;
76008 case PR_SET_DUMPABLE:
76009 - if (arg2 < 0 || arg2 > 1) {
76010 + if (arg2 > 1) {
76011 error = -EINVAL;
76012 break;
76013 }
76014 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
76015 index b8bd058..ab6a76be 100644
76016 --- a/kernel/sysctl.c
76017 +++ b/kernel/sysctl.c
76018 @@ -63,6 +63,13 @@
76019 static int deprecated_sysctl_warning(struct __sysctl_args *args);
76020
76021 #if defined(CONFIG_SYSCTL)
76022 +#include <linux/grsecurity.h>
76023 +#include <linux/grinternal.h>
76024 +
76025 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
76026 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
76027 + const int op);
76028 +extern int gr_handle_chroot_sysctl(const int op);
76029
76030 /* External variables not in a header file. */
76031 extern int C_A_D;
76032 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
76033 static int proc_taint(struct ctl_table *table, int write,
76034 void __user *buffer, size_t *lenp, loff_t *ppos);
76035 #endif
76036 +extern ctl_table grsecurity_table[];
76037
76038 static struct ctl_table root_table[];
76039 static struct ctl_table_root sysctl_table_root;
76040 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
76041 int sysctl_legacy_va_layout;
76042 #endif
76043
76044 +#ifdef CONFIG_PAX_SOFTMODE
76045 +static ctl_table pax_table[] = {
76046 + {
76047 + .ctl_name = CTL_UNNUMBERED,
76048 + .procname = "softmode",
76049 + .data = &pax_softmode,
76050 + .maxlen = sizeof(unsigned int),
76051 + .mode = 0600,
76052 + .proc_handler = &proc_dointvec,
76053 + },
76054 +
76055 + { .ctl_name = 0 }
76056 +};
76057 +#endif
76058 +
76059 extern int prove_locking;
76060 extern int lock_stat;
76061
76062 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
76063 #endif
76064
76065 static struct ctl_table kern_table[] = {
76066 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
76067 + {
76068 + .ctl_name = CTL_UNNUMBERED,
76069 + .procname = "grsecurity",
76070 + .mode = 0500,
76071 + .child = grsecurity_table,
76072 + },
76073 +#endif
76074 +
76075 +#ifdef CONFIG_PAX_SOFTMODE
76076 + {
76077 + .ctl_name = CTL_UNNUMBERED,
76078 + .procname = "pax",
76079 + .mode = 0500,
76080 + .child = pax_table,
76081 + },
76082 +#endif
76083 +
76084 {
76085 .ctl_name = CTL_UNNUMBERED,
76086 .procname = "sched_child_runs_first",
76087 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
76088 .data = &modprobe_path,
76089 .maxlen = KMOD_PATH_LEN,
76090 .mode = 0644,
76091 - .proc_handler = &proc_dostring,
76092 - .strategy = &sysctl_string,
76093 + .proc_handler = &proc_dostring_modpriv,
76094 + .strategy = &sysctl_string_modpriv,
76095 },
76096 {
76097 .ctl_name = CTL_UNNUMBERED,
76098 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
76099 .mode = 0644,
76100 .proc_handler = &proc_dointvec
76101 },
76102 + {
76103 + .procname = "heap_stack_gap",
76104 + .data = &sysctl_heap_stack_gap,
76105 + .maxlen = sizeof(sysctl_heap_stack_gap),
76106 + .mode = 0644,
76107 + .proc_handler = proc_doulongvec_minmax,
76108 + },
76109 #else
76110 {
76111 .ctl_name = CTL_UNNUMBERED,
76112 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
76113 return 0;
76114 }
76115
76116 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
76117 +
76118 static int parse_table(int __user *name, int nlen,
76119 void __user *oldval, size_t __user *oldlenp,
76120 void __user *newval, size_t newlen,
76121 @@ -1821,7 +1871,7 @@ repeat:
76122 if (n == table->ctl_name) {
76123 int error;
76124 if (table->child) {
76125 - if (sysctl_perm(root, table, MAY_EXEC))
76126 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
76127 return -EPERM;
76128 name++;
76129 nlen--;
76130 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
76131 int error;
76132 int mode;
76133
76134 + if (table->parent != NULL && table->parent->procname != NULL &&
76135 + table->procname != NULL &&
76136 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
76137 + return -EACCES;
76138 + if (gr_handle_chroot_sysctl(op))
76139 + return -EACCES;
76140 + error = gr_handle_sysctl(table, op);
76141 + if (error)
76142 + return error;
76143 +
76144 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
76145 + if (error)
76146 + return error;
76147 +
76148 + if (root->permissions)
76149 + mode = root->permissions(root, current->nsproxy, table);
76150 + else
76151 + mode = table->mode;
76152 +
76153 + return test_perm(mode, op);
76154 +}
76155 +
76156 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
76157 +{
76158 + int error;
76159 + int mode;
76160 +
76161 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
76162 if (error)
76163 return error;
76164 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
76165 buffer, lenp, ppos);
76166 }
76167
76168 +int proc_dostring_modpriv(struct ctl_table *table, int write,
76169 + void __user *buffer, size_t *lenp, loff_t *ppos)
76170 +{
76171 + if (write && !capable(CAP_SYS_MODULE))
76172 + return -EPERM;
76173 +
76174 + return _proc_do_string(table->data, table->maxlen, write,
76175 + buffer, lenp, ppos);
76176 +}
76177 +
76178
76179 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
76180 int *valp,
76181 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
76182 vleft = table->maxlen / sizeof(unsigned long);
76183 left = *lenp;
76184
76185 - for (; left && vleft--; i++, min++, max++, first=0) {
76186 + for (; left && vleft--; i++, first=0) {
76187 if (write) {
76188 while (left) {
76189 char c;
76190 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
76191 return -ENOSYS;
76192 }
76193
76194 +int proc_dostring_modpriv(struct ctl_table *table, int write,
76195 + void __user *buffer, size_t *lenp, loff_t *ppos)
76196 +{
76197 + return -ENOSYS;
76198 +}
76199 +
76200 int proc_dointvec(struct ctl_table *table, int write,
76201 void __user *buffer, size_t *lenp, loff_t *ppos)
76202 {
76203 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
76204 return 1;
76205 }
76206
76207 +int sysctl_string_modpriv(struct ctl_table *table,
76208 + void __user *oldval, size_t __user *oldlenp,
76209 + void __user *newval, size_t newlen)
76210 +{
76211 + if (newval && newlen && !capable(CAP_SYS_MODULE))
76212 + return -EPERM;
76213 +
76214 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
76215 +}
76216 +
76217 /*
76218 * This function makes sure that all of the integers in the vector
76219 * are between the minimum and maximum values given in the arrays
76220 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
76221 return -ENOSYS;
76222 }
76223
76224 +int sysctl_string_modpriv(struct ctl_table *table,
76225 + void __user *oldval, size_t __user *oldlenp,
76226 + void __user *newval, size_t newlen)
76227 +{
76228 + return -ENOSYS;
76229 +}
76230 +
76231 int sysctl_intvec(struct ctl_table *table,
76232 void __user *oldval, size_t __user *oldlenp,
76233 void __user *newval, size_t newlen)
76234 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
76235 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
76236 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
76237 EXPORT_SYMBOL(proc_dostring);
76238 +EXPORT_SYMBOL(proc_dostring_modpriv);
76239 EXPORT_SYMBOL(proc_doulongvec_minmax);
76240 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
76241 EXPORT_SYMBOL(register_sysctl_table);
76242 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
76243 EXPORT_SYMBOL(sysctl_jiffies);
76244 EXPORT_SYMBOL(sysctl_ms_jiffies);
76245 EXPORT_SYMBOL(sysctl_string);
76246 +EXPORT_SYMBOL(sysctl_string_modpriv);
76247 EXPORT_SYMBOL(sysctl_data);
76248 EXPORT_SYMBOL(unregister_sysctl_table);
76249 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
76250 index 469193c..ea3ecb2 100644
76251 --- a/kernel/sysctl_check.c
76252 +++ b/kernel/sysctl_check.c
76253 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
76254 } else {
76255 if ((table->strategy == sysctl_data) ||
76256 (table->strategy == sysctl_string) ||
76257 + (table->strategy == sysctl_string_modpriv) ||
76258 (table->strategy == sysctl_intvec) ||
76259 (table->strategy == sysctl_jiffies) ||
76260 (table->strategy == sysctl_ms_jiffies) ||
76261 (table->proc_handler == proc_dostring) ||
76262 + (table->proc_handler == proc_dostring_modpriv) ||
76263 (table->proc_handler == proc_dointvec) ||
76264 (table->proc_handler == proc_dointvec_minmax) ||
76265 (table->proc_handler == proc_dointvec_jiffies) ||
76266 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
76267 index a4ef542..798bcd7 100644
76268 --- a/kernel/taskstats.c
76269 +++ b/kernel/taskstats.c
76270 @@ -26,9 +26,12 @@
76271 #include <linux/cgroup.h>
76272 #include <linux/fs.h>
76273 #include <linux/file.h>
76274 +#include <linux/grsecurity.h>
76275 #include <net/genetlink.h>
76276 #include <asm/atomic.h>
76277
76278 +extern int gr_is_taskstats_denied(int pid);
76279 +
76280 /*
76281 * Maximum length of a cpumask that can be specified in
76282 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
76283 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
76284 size_t size;
76285 cpumask_var_t mask;
76286
76287 + if (gr_is_taskstats_denied(current->pid))
76288 + return -EACCES;
76289 +
76290 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
76291 return -ENOMEM;
76292
76293 diff --git a/kernel/time.c b/kernel/time.c
76294 index 33df60e..ca768bd 100644
76295 --- a/kernel/time.c
76296 +++ b/kernel/time.c
76297 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
76298 return error;
76299
76300 if (tz) {
76301 + /* we log in do_settimeofday called below, so don't log twice
76302 + */
76303 + if (!tv)
76304 + gr_log_timechange();
76305 +
76306 /* SMP safe, global irq locking makes it work. */
76307 sys_tz = *tz;
76308 update_vsyscall_tz();
76309 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
76310 * Avoid unnecessary multiplications/divisions in the
76311 * two most common HZ cases:
76312 */
76313 -unsigned int inline jiffies_to_msecs(const unsigned long j)
76314 +inline unsigned int jiffies_to_msecs(const unsigned long j)
76315 {
76316 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
76317 return (MSEC_PER_SEC / HZ) * j;
76318 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
76319 }
76320 EXPORT_SYMBOL(jiffies_to_msecs);
76321
76322 -unsigned int inline jiffies_to_usecs(const unsigned long j)
76323 +inline unsigned int jiffies_to_usecs(const unsigned long j)
76324 {
76325 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
76326 return (USEC_PER_SEC / HZ) * j;
76327 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
76328 index 57b953f..06f149f 100644
76329 --- a/kernel/time/tick-broadcast.c
76330 +++ b/kernel/time/tick-broadcast.c
76331 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
76332 * then clear the broadcast bit.
76333 */
76334 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
76335 - int cpu = smp_processor_id();
76336 + cpu = smp_processor_id();
76337
76338 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
76339 tick_broadcast_clear_oneshot(cpu);
76340 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
76341 index 4a71cff..ffb5548 100644
76342 --- a/kernel/time/timekeeping.c
76343 +++ b/kernel/time/timekeeping.c
76344 @@ -14,6 +14,7 @@
76345 #include <linux/init.h>
76346 #include <linux/mm.h>
76347 #include <linux/sched.h>
76348 +#include <linux/grsecurity.h>
76349 #include <linux/sysdev.h>
76350 #include <linux/clocksource.h>
76351 #include <linux/jiffies.h>
76352 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
76353 */
76354 struct timespec ts = xtime;
76355 timespec_add_ns(&ts, nsec);
76356 - ACCESS_ONCE(xtime_cache) = ts;
76357 + ACCESS_ONCE_RW(xtime_cache) = ts;
76358 }
76359
76360 /* must hold xtime_lock */
76361 @@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
76362 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
76363 return -EINVAL;
76364
76365 + gr_log_timechange();
76366 +
76367 write_seqlock_irqsave(&xtime_lock, flags);
76368
76369 timekeeping_forward_now();
76370 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
76371 index 54c0dda..e9095d9 100644
76372 --- a/kernel/time/timer_list.c
76373 +++ b/kernel/time/timer_list.c
76374 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
76375
76376 static void print_name_offset(struct seq_file *m, void *sym)
76377 {
76378 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76379 + SEQ_printf(m, "<%p>", NULL);
76380 +#else
76381 char symname[KSYM_NAME_LEN];
76382
76383 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
76384 SEQ_printf(m, "<%p>", sym);
76385 else
76386 SEQ_printf(m, "%s", symname);
76387 +#endif
76388 }
76389
76390 static void
76391 @@ -112,7 +116,11 @@ next_one:
76392 static void
76393 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
76394 {
76395 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76396 + SEQ_printf(m, " .base: %p\n", NULL);
76397 +#else
76398 SEQ_printf(m, " .base: %p\n", base);
76399 +#endif
76400 SEQ_printf(m, " .index: %d\n",
76401 base->index);
76402 SEQ_printf(m, " .resolution: %Lu nsecs\n",
76403 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
76404 {
76405 struct proc_dir_entry *pe;
76406
76407 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
76408 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
76409 +#else
76410 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
76411 +#endif
76412 if (!pe)
76413 return -ENOMEM;
76414 return 0;
76415 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
76416 index ee5681f..634089b 100644
76417 --- a/kernel/time/timer_stats.c
76418 +++ b/kernel/time/timer_stats.c
76419 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
76420 static unsigned long nr_entries;
76421 static struct entry entries[MAX_ENTRIES];
76422
76423 -static atomic_t overflow_count;
76424 +static atomic_unchecked_t overflow_count;
76425
76426 /*
76427 * The entries are in a hash-table, for fast lookup:
76428 @@ -140,7 +140,7 @@ static void reset_entries(void)
76429 nr_entries = 0;
76430 memset(entries, 0, sizeof(entries));
76431 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
76432 - atomic_set(&overflow_count, 0);
76433 + atomic_set_unchecked(&overflow_count, 0);
76434 }
76435
76436 static struct entry *alloc_entry(void)
76437 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
76438 if (likely(entry))
76439 entry->count++;
76440 else
76441 - atomic_inc(&overflow_count);
76442 + atomic_inc_unchecked(&overflow_count);
76443
76444 out_unlock:
76445 spin_unlock_irqrestore(lock, flags);
76446 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
76447
76448 static void print_name_offset(struct seq_file *m, unsigned long addr)
76449 {
76450 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76451 + seq_printf(m, "<%p>", NULL);
76452 +#else
76453 char symname[KSYM_NAME_LEN];
76454
76455 if (lookup_symbol_name(addr, symname) < 0)
76456 seq_printf(m, "<%p>", (void *)addr);
76457 else
76458 seq_printf(m, "%s", symname);
76459 +#endif
76460 }
76461
76462 static int tstats_show(struct seq_file *m, void *v)
76463 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
76464
76465 seq_puts(m, "Timer Stats Version: v0.2\n");
76466 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
76467 - if (atomic_read(&overflow_count))
76468 + if (atomic_read_unchecked(&overflow_count))
76469 seq_printf(m, "Overflow: %d entries\n",
76470 - atomic_read(&overflow_count));
76471 + atomic_read_unchecked(&overflow_count));
76472
76473 for (i = 0; i < nr_entries; i++) {
76474 entry = entries + i;
76475 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
76476 {
76477 struct proc_dir_entry *pe;
76478
76479 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
76480 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
76481 +#else
76482 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
76483 +#endif
76484 if (!pe)
76485 return -ENOMEM;
76486 return 0;
76487 diff --git a/kernel/timer.c b/kernel/timer.c
76488 index cb3c1f1..8bf5526 100644
76489 --- a/kernel/timer.c
76490 +++ b/kernel/timer.c
76491 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
76492 /*
76493 * This function runs timers and the timer-tq in bottom half context.
76494 */
76495 -static void run_timer_softirq(struct softirq_action *h)
76496 +static void run_timer_softirq(void)
76497 {
76498 struct tvec_base *base = __get_cpu_var(tvec_bases);
76499
76500 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
76501 index d9d6206..f19467e 100644
76502 --- a/kernel/trace/blktrace.c
76503 +++ b/kernel/trace/blktrace.c
76504 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
76505 struct blk_trace *bt = filp->private_data;
76506 char buf[16];
76507
76508 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
76509 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
76510
76511 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
76512 }
76513 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
76514 return 1;
76515
76516 bt = buf->chan->private_data;
76517 - atomic_inc(&bt->dropped);
76518 + atomic_inc_unchecked(&bt->dropped);
76519 return 0;
76520 }
76521
76522 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
76523
76524 bt->dir = dir;
76525 bt->dev = dev;
76526 - atomic_set(&bt->dropped, 0);
76527 + atomic_set_unchecked(&bt->dropped, 0);
76528
76529 ret = -EIO;
76530 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
76531 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
76532 index 4872937..c794d40 100644
76533 --- a/kernel/trace/ftrace.c
76534 +++ b/kernel/trace/ftrace.c
76535 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
76536
76537 ip = rec->ip;
76538
76539 + ret = ftrace_arch_code_modify_prepare();
76540 + FTRACE_WARN_ON(ret);
76541 + if (ret)
76542 + return 0;
76543 +
76544 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
76545 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
76546 if (ret) {
76547 ftrace_bug(ret, ip);
76548 rec->flags |= FTRACE_FL_FAILED;
76549 - return 0;
76550 }
76551 - return 1;
76552 + return ret ? 0 : 1;
76553 }
76554
76555 /*
76556 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
76557 index e749a05..19c6e94 100644
76558 --- a/kernel/trace/ring_buffer.c
76559 +++ b/kernel/trace/ring_buffer.c
76560 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
76561 * the reader page). But if the next page is a header page,
76562 * its flags will be non zero.
76563 */
76564 -static int inline
76565 +static inline int
76566 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
76567 struct buffer_page *page, struct list_head *list)
76568 {
76569 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
76570 index a2a2d1f..7f32b09 100644
76571 --- a/kernel/trace/trace.c
76572 +++ b/kernel/trace/trace.c
76573 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
76574 size_t rem;
76575 unsigned int i;
76576
76577 + pax_track_stack();
76578 +
76579 /* copy the tracer to avoid using a global lock all around */
76580 mutex_lock(&trace_types_lock);
76581 if (unlikely(old_tracer != current_trace && current_trace)) {
76582 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
76583 int entries, size, i;
76584 size_t ret;
76585
76586 + pax_track_stack();
76587 +
76588 if (*ppos & (PAGE_SIZE - 1)) {
76589 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
76590 return -EINVAL;
76591 @@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
76592 };
76593 #endif
76594
76595 -static struct dentry *d_tracer;
76596 -
76597 struct dentry *tracing_init_dentry(void)
76598 {
76599 + static struct dentry *d_tracer;
76600 static int once;
76601
76602 if (d_tracer)
76603 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
76604 return d_tracer;
76605 }
76606
76607 -static struct dentry *d_percpu;
76608 -
76609 struct dentry *tracing_dentry_percpu(void)
76610 {
76611 + static struct dentry *d_percpu;
76612 static int once;
76613 struct dentry *d_tracer;
76614
76615 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
76616 index d128f65..f37b4af 100644
76617 --- a/kernel/trace/trace_events.c
76618 +++ b/kernel/trace/trace_events.c
76619 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
76620 * Modules must own their file_operations to keep up with
76621 * reference counting.
76622 */
76623 +
76624 struct ftrace_module_file_ops {
76625 struct list_head list;
76626 struct module *mod;
76627 - struct file_operations id;
76628 - struct file_operations enable;
76629 - struct file_operations format;
76630 - struct file_operations filter;
76631 };
76632
76633 static void remove_subsystem_dir(const char *name)
76634 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
76635
76636 file_ops->mod = mod;
76637
76638 - file_ops->id = ftrace_event_id_fops;
76639 - file_ops->id.owner = mod;
76640 -
76641 - file_ops->enable = ftrace_enable_fops;
76642 - file_ops->enable.owner = mod;
76643 -
76644 - file_ops->filter = ftrace_event_filter_fops;
76645 - file_ops->filter.owner = mod;
76646 -
76647 - file_ops->format = ftrace_event_format_fops;
76648 - file_ops->format.owner = mod;
76649 + pax_open_kernel();
76650 + *(void **)&mod->trace_id.owner = mod;
76651 + *(void **)&mod->trace_enable.owner = mod;
76652 + *(void **)&mod->trace_filter.owner = mod;
76653 + *(void **)&mod->trace_format.owner = mod;
76654 + pax_close_kernel();
76655
76656 list_add(&file_ops->list, &ftrace_module_file_list);
76657
76658 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
76659 call->mod = mod;
76660 list_add(&call->list, &ftrace_events);
76661 event_create_dir(call, d_events,
76662 - &file_ops->id, &file_ops->enable,
76663 - &file_ops->filter, &file_ops->format);
76664 + &mod->trace_id, &mod->trace_enable,
76665 + &mod->trace_filter, &mod->trace_format);
76666 }
76667 }
76668
76669 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
76670 index 0acd834..b800b56 100644
76671 --- a/kernel/trace/trace_mmiotrace.c
76672 +++ b/kernel/trace/trace_mmiotrace.c
76673 @@ -23,7 +23,7 @@ struct header_iter {
76674 static struct trace_array *mmio_trace_array;
76675 static bool overrun_detected;
76676 static unsigned long prev_overruns;
76677 -static atomic_t dropped_count;
76678 +static atomic_unchecked_t dropped_count;
76679
76680 static void mmio_reset_data(struct trace_array *tr)
76681 {
76682 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
76683
76684 static unsigned long count_overruns(struct trace_iterator *iter)
76685 {
76686 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
76687 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
76688 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
76689
76690 if (over > prev_overruns)
76691 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
76692 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
76693 sizeof(*entry), 0, pc);
76694 if (!event) {
76695 - atomic_inc(&dropped_count);
76696 + atomic_inc_unchecked(&dropped_count);
76697 return;
76698 }
76699 entry = ring_buffer_event_data(event);
76700 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
76701 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
76702 sizeof(*entry), 0, pc);
76703 if (!event) {
76704 - atomic_inc(&dropped_count);
76705 + atomic_inc_unchecked(&dropped_count);
76706 return;
76707 }
76708 entry = ring_buffer_event_data(event);
76709 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
76710 index b6c12c6..41fdc53 100644
76711 --- a/kernel/trace/trace_output.c
76712 +++ b/kernel/trace/trace_output.c
76713 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
76714 return 0;
76715 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
76716 if (!IS_ERR(p)) {
76717 - p = mangle_path(s->buffer + s->len, p, "\n");
76718 + p = mangle_path(s->buffer + s->len, p, "\n\\");
76719 if (p) {
76720 s->len = p - s->buffer;
76721 return 1;
76722 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
76723 index 8504ac7..ecf0adb 100644
76724 --- a/kernel/trace/trace_stack.c
76725 +++ b/kernel/trace/trace_stack.c
76726 @@ -50,7 +50,7 @@ static inline void check_stack(void)
76727 return;
76728
76729 /* we do not handle interrupt stacks yet */
76730 - if (!object_is_on_stack(&this_size))
76731 + if (!object_starts_on_stack(&this_size))
76732 return;
76733
76734 local_irq_save(flags);
76735 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
76736 index 40cafb0..d5ead43 100644
76737 --- a/kernel/trace/trace_workqueue.c
76738 +++ b/kernel/trace/trace_workqueue.c
76739 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
76740 int cpu;
76741 pid_t pid;
76742 /* Can be inserted from interrupt or user context, need to be atomic */
76743 - atomic_t inserted;
76744 + atomic_unchecked_t inserted;
76745 /*
76746 * Don't need to be atomic, works are serialized in a single workqueue thread
76747 * on a single CPU.
76748 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
76749 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
76750 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
76751 if (node->pid == wq_thread->pid) {
76752 - atomic_inc(&node->inserted);
76753 + atomic_inc_unchecked(&node->inserted);
76754 goto found;
76755 }
76756 }
76757 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
76758 tsk = get_pid_task(pid, PIDTYPE_PID);
76759 if (tsk) {
76760 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
76761 - atomic_read(&cws->inserted), cws->executed,
76762 + atomic_read_unchecked(&cws->inserted), cws->executed,
76763 tsk->comm);
76764 put_task_struct(tsk);
76765 }
76766 diff --git a/kernel/user.c b/kernel/user.c
76767 index 1b91701..8795237 100644
76768 --- a/kernel/user.c
76769 +++ b/kernel/user.c
76770 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
76771 spin_lock_irq(&uidhash_lock);
76772 up = uid_hash_find(uid, hashent);
76773 if (up) {
76774 + put_user_ns(ns);
76775 key_put(new->uid_keyring);
76776 key_put(new->session_keyring);
76777 kmem_cache_free(uid_cachep, new);
76778 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
76779 index 234ceb1..ad74049 100644
76780 --- a/lib/Kconfig.debug
76781 +++ b/lib/Kconfig.debug
76782 @@ -905,7 +905,7 @@ config LATENCYTOP
76783 select STACKTRACE
76784 select SCHEDSTATS
76785 select SCHED_DEBUG
76786 - depends on HAVE_LATENCYTOP_SUPPORT
76787 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
76788 help
76789 Enable this option if you want to use the LatencyTOP tool
76790 to find out which userspace is blocking on what kernel operations.
76791 diff --git a/lib/bitmap.c b/lib/bitmap.c
76792 index 7025658..8d14cab 100644
76793 --- a/lib/bitmap.c
76794 +++ b/lib/bitmap.c
76795 @@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
76796 {
76797 int c, old_c, totaldigits, ndigits, nchunks, nbits;
76798 u32 chunk;
76799 - const char __user *ubuf = buf;
76800 + const char __user *ubuf = (const char __force_user *)buf;
76801
76802 bitmap_zero(maskp, nmaskbits);
76803
76804 @@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
76805 {
76806 if (!access_ok(VERIFY_READ, ubuf, ulen))
76807 return -EFAULT;
76808 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
76809 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
76810 }
76811 EXPORT_SYMBOL(bitmap_parse_user);
76812
76813 diff --git a/lib/bug.c b/lib/bug.c
76814 index 300e41a..2779eb0 100644
76815 --- a/lib/bug.c
76816 +++ b/lib/bug.c
76817 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
76818 return BUG_TRAP_TYPE_NONE;
76819
76820 bug = find_bug(bugaddr);
76821 + if (!bug)
76822 + return BUG_TRAP_TYPE_NONE;
76823
76824 printk(KERN_EMERG "------------[ cut here ]------------\n");
76825
76826 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
76827 index 2b413db..e21d207 100644
76828 --- a/lib/debugobjects.c
76829 +++ b/lib/debugobjects.c
76830 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
76831 if (limit > 4)
76832 return;
76833
76834 - is_on_stack = object_is_on_stack(addr);
76835 + is_on_stack = object_starts_on_stack(addr);
76836 if (is_on_stack == onstack)
76837 return;
76838
76839 diff --git a/lib/devres.c b/lib/devres.c
76840 index 72c8909..7543868 100644
76841 --- a/lib/devres.c
76842 +++ b/lib/devres.c
76843 @@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
76844 {
76845 iounmap(addr);
76846 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
76847 - (void *)addr));
76848 + (void __force *)addr));
76849 }
76850 EXPORT_SYMBOL(devm_iounmap);
76851
76852 @@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
76853 {
76854 ioport_unmap(addr);
76855 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
76856 - devm_ioport_map_match, (void *)addr));
76857 + devm_ioport_map_match, (void __force *)addr));
76858 }
76859 EXPORT_SYMBOL(devm_ioport_unmap);
76860
76861 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
76862 index 084e879..0674448 100644
76863 --- a/lib/dma-debug.c
76864 +++ b/lib/dma-debug.c
76865 @@ -861,7 +861,7 @@ out:
76866
76867 static void check_for_stack(struct device *dev, void *addr)
76868 {
76869 - if (object_is_on_stack(addr))
76870 + if (object_starts_on_stack(addr))
76871 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
76872 "stack [addr=%p]\n", addr);
76873 }
76874 diff --git a/lib/idr.c b/lib/idr.c
76875 index eda7ba3..915dfae 100644
76876 --- a/lib/idr.c
76877 +++ b/lib/idr.c
76878 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
76879 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
76880
76881 /* if already at the top layer, we need to grow */
76882 - if (id >= 1 << (idp->layers * IDR_BITS)) {
76883 + if (id >= (1 << (idp->layers * IDR_BITS))) {
76884 *starting_id = id;
76885 return IDR_NEED_TO_GROW;
76886 }
76887 diff --git a/lib/inflate.c b/lib/inflate.c
76888 index d102559..4215f31 100644
76889 --- a/lib/inflate.c
76890 +++ b/lib/inflate.c
76891 @@ -266,7 +266,7 @@ static void free(void *where)
76892 malloc_ptr = free_mem_ptr;
76893 }
76894 #else
76895 -#define malloc(a) kmalloc(a, GFP_KERNEL)
76896 +#define malloc(a) kmalloc((a), GFP_KERNEL)
76897 #define free(a) kfree(a)
76898 #endif
76899
76900 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
76901 index bd2bea9..6b3c95e 100644
76902 --- a/lib/is_single_threaded.c
76903 +++ b/lib/is_single_threaded.c
76904 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
76905 struct task_struct *p, *t;
76906 bool ret;
76907
76908 + if (!mm)
76909 + return true;
76910 +
76911 if (atomic_read(&task->signal->live) != 1)
76912 return false;
76913
76914 diff --git a/lib/kobject.c b/lib/kobject.c
76915 index b512b74..8115eb1 100644
76916 --- a/lib/kobject.c
76917 +++ b/lib/kobject.c
76918 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
76919 return ret;
76920 }
76921
76922 -struct sysfs_ops kobj_sysfs_ops = {
76923 +const struct sysfs_ops kobj_sysfs_ops = {
76924 .show = kobj_attr_show,
76925 .store = kobj_attr_store,
76926 };
76927 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
76928 * If the kset was not able to be created, NULL will be returned.
76929 */
76930 static struct kset *kset_create(const char *name,
76931 - struct kset_uevent_ops *uevent_ops,
76932 + const struct kset_uevent_ops *uevent_ops,
76933 struct kobject *parent_kobj)
76934 {
76935 struct kset *kset;
76936 @@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
76937 * If the kset was not able to be created, NULL will be returned.
76938 */
76939 struct kset *kset_create_and_add(const char *name,
76940 - struct kset_uevent_ops *uevent_ops,
76941 + const struct kset_uevent_ops *uevent_ops,
76942 struct kobject *parent_kobj)
76943 {
76944 struct kset *kset;
76945 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
76946 index 507b821..0bf8ed0 100644
76947 --- a/lib/kobject_uevent.c
76948 +++ b/lib/kobject_uevent.c
76949 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
76950 const char *subsystem;
76951 struct kobject *top_kobj;
76952 struct kset *kset;
76953 - struct kset_uevent_ops *uevent_ops;
76954 + const struct kset_uevent_ops *uevent_ops;
76955 u64 seq;
76956 int i = 0;
76957 int retval = 0;
76958 diff --git a/lib/kref.c b/lib/kref.c
76959 index 9ecd6e8..12c94c1 100644
76960 --- a/lib/kref.c
76961 +++ b/lib/kref.c
76962 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
76963 */
76964 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
76965 {
76966 - WARN_ON(release == NULL);
76967 + BUG_ON(release == NULL);
76968 WARN_ON(release == (void (*)(struct kref *))kfree);
76969
76970 if (atomic_dec_and_test(&kref->refcount)) {
76971 diff --git a/lib/parser.c b/lib/parser.c
76972 index b00d020..1b34325 100644
76973 --- a/lib/parser.c
76974 +++ b/lib/parser.c
76975 @@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
76976 char *buf;
76977 int ret;
76978
76979 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
76980 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
76981 if (!buf)
76982 return -ENOMEM;
76983 memcpy(buf, s->from, s->to - s->from);
76984 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
76985 index 92cdd99..a8149d7 100644
76986 --- a/lib/radix-tree.c
76987 +++ b/lib/radix-tree.c
76988 @@ -81,7 +81,7 @@ struct radix_tree_preload {
76989 int nr;
76990 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
76991 };
76992 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
76993 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
76994
76995 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
76996 {
76997 diff --git a/lib/random32.c b/lib/random32.c
76998 index 217d5c4..45aba8a 100644
76999 --- a/lib/random32.c
77000 +++ b/lib/random32.c
77001 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
77002 */
77003 static inline u32 __seed(u32 x, u32 m)
77004 {
77005 - return (x < m) ? x + m : x;
77006 + return (x <= m) ? x + m + 1 : x;
77007 }
77008
77009 /**
77010 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
77011 index 33bed5e..1477e46 100644
77012 --- a/lib/vsprintf.c
77013 +++ b/lib/vsprintf.c
77014 @@ -16,6 +16,9 @@
77015 * - scnprintf and vscnprintf
77016 */
77017
77018 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77019 +#define __INCLUDED_BY_HIDESYM 1
77020 +#endif
77021 #include <stdarg.h>
77022 #include <linux/module.h>
77023 #include <linux/types.h>
77024 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
77025 return buf;
77026 }
77027
77028 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
77029 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
77030 {
77031 int len, i;
77032
77033 if ((unsigned long)s < PAGE_SIZE)
77034 - s = "<NULL>";
77035 + s = "(null)";
77036
77037 len = strnlen(s, spec.precision);
77038
77039 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
77040 unsigned long value = (unsigned long) ptr;
77041 #ifdef CONFIG_KALLSYMS
77042 char sym[KSYM_SYMBOL_LEN];
77043 - if (ext != 'f' && ext != 's')
77044 + if (ext != 'f' && ext != 's' && ext != 'a')
77045 sprint_symbol(sym, value);
77046 else
77047 kallsyms_lookup(value, NULL, NULL, NULL, sym);
77048 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
77049 * - 'f' For simple symbolic function names without offset
77050 * - 'S' For symbolic direct pointers with offset
77051 * - 's' For symbolic direct pointers without offset
77052 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
77053 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
77054 * - 'R' For a struct resource pointer, it prints the range of
77055 * addresses (not the name nor the flags)
77056 * - 'M' For a 6-byte MAC address, it prints the address in the
77057 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
77058 struct printf_spec spec)
77059 {
77060 if (!ptr)
77061 - return string(buf, end, "(null)", spec);
77062 + return string(buf, end, "(nil)", spec);
77063
77064 switch (*fmt) {
77065 case 'F':
77066 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
77067 case 's':
77068 /* Fallthrough */
77069 case 'S':
77070 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77071 + break;
77072 +#else
77073 + return symbol_string(buf, end, ptr, spec, *fmt);
77074 +#endif
77075 + case 'a':
77076 + /* Fallthrough */
77077 + case 'A':
77078 return symbol_string(buf, end, ptr, spec, *fmt);
77079 case 'R':
77080 return resource_string(buf, end, ptr, spec);
77081 @@ -1445,7 +1458,7 @@ do { \
77082 size_t len;
77083 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
77084 || (unsigned long)save_str < PAGE_SIZE)
77085 - save_str = "<NULL>";
77086 + save_str = "(null)";
77087 len = strlen(save_str);
77088 if (str + len + 1 < end)
77089 memcpy(str, save_str, len + 1);
77090 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
77091 typeof(type) value; \
77092 if (sizeof(type) == 8) { \
77093 args = PTR_ALIGN(args, sizeof(u32)); \
77094 - *(u32 *)&value = *(u32 *)args; \
77095 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
77096 + *(u32 *)&value = *(const u32 *)args; \
77097 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
77098 } else { \
77099 args = PTR_ALIGN(args, sizeof(type)); \
77100 - value = *(typeof(type) *)args; \
77101 + value = *(const typeof(type) *)args; \
77102 } \
77103 args += sizeof(type); \
77104 value; \
77105 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
77106 const char *str_arg = args;
77107 size_t len = strlen(str_arg);
77108 args += len + 1;
77109 - str = string(str, end, (char *)str_arg, spec);
77110 + str = string(str, end, str_arg, spec);
77111 break;
77112 }
77113
77114 diff --git a/localversion-grsec b/localversion-grsec
77115 new file mode 100644
77116 index 0000000..7cd6065
77117 --- /dev/null
77118 +++ b/localversion-grsec
77119 @@ -0,0 +1 @@
77120 +-grsec
77121 diff --git a/mm/Kconfig b/mm/Kconfig
77122 index 2c19c0b..f3c3f83 100644
77123 --- a/mm/Kconfig
77124 +++ b/mm/Kconfig
77125 @@ -228,7 +228,7 @@ config KSM
77126 config DEFAULT_MMAP_MIN_ADDR
77127 int "Low address space to protect from user allocation"
77128 depends on MMU
77129 - default 4096
77130 + default 65536
77131 help
77132 This is the portion of low virtual memory which should be protected
77133 from userspace allocation. Keeping a user from writing to low pages
77134 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
77135 index 67a33a5..094dcf1 100644
77136 --- a/mm/backing-dev.c
77137 +++ b/mm/backing-dev.c
77138 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
77139 list_add_tail_rcu(&wb->list, &bdi->wb_list);
77140 spin_unlock(&bdi->wb_lock);
77141
77142 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
77143 + tsk->flags |= PF_SWAPWRITE;
77144 set_freezable();
77145
77146 /*
77147 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
77148 * Add the default flusher task that gets created for any bdi
77149 * that has dirty data pending writeout
77150 */
77151 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
77152 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
77153 {
77154 if (!bdi_cap_writeback_dirty(bdi))
77155 return;
77156 diff --git a/mm/filemap.c b/mm/filemap.c
77157 index a1fe378..e26702f 100644
77158 --- a/mm/filemap.c
77159 +++ b/mm/filemap.c
77160 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
77161 struct address_space *mapping = file->f_mapping;
77162
77163 if (!mapping->a_ops->readpage)
77164 - return -ENOEXEC;
77165 + return -ENODEV;
77166 file_accessed(file);
77167 vma->vm_ops = &generic_file_vm_ops;
77168 vma->vm_flags |= VM_CAN_NONLINEAR;
77169 @@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
77170 *pos = i_size_read(inode);
77171
77172 if (limit != RLIM_INFINITY) {
77173 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
77174 if (*pos >= limit) {
77175 send_sig(SIGXFSZ, current, 0);
77176 return -EFBIG;
77177 diff --git a/mm/fremap.c b/mm/fremap.c
77178 index b6ec85a..a24ac22 100644
77179 --- a/mm/fremap.c
77180 +++ b/mm/fremap.c
77181 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
77182 retry:
77183 vma = find_vma(mm, start);
77184
77185 +#ifdef CONFIG_PAX_SEGMEXEC
77186 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
77187 + goto out;
77188 +#endif
77189 +
77190 /*
77191 * Make sure the vma is shared, that it supports prefaulting,
77192 * and that the remapped range is valid and fully within
77193 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
77194 /*
77195 * drop PG_Mlocked flag for over-mapped range
77196 */
77197 - unsigned int saved_flags = vma->vm_flags;
77198 + unsigned long saved_flags = vma->vm_flags;
77199 munlock_vma_pages_range(vma, start, start + size);
77200 vma->vm_flags = saved_flags;
77201 }
77202 diff --git a/mm/highmem.c b/mm/highmem.c
77203 index 9c1e627..5ca9447 100644
77204 --- a/mm/highmem.c
77205 +++ b/mm/highmem.c
77206 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
77207 * So no dangers, even with speculative execution.
77208 */
77209 page = pte_page(pkmap_page_table[i]);
77210 + pax_open_kernel();
77211 pte_clear(&init_mm, (unsigned long)page_address(page),
77212 &pkmap_page_table[i]);
77213 -
77214 + pax_close_kernel();
77215 set_page_address(page, NULL);
77216 need_flush = 1;
77217 }
77218 @@ -177,9 +178,11 @@ start:
77219 }
77220 }
77221 vaddr = PKMAP_ADDR(last_pkmap_nr);
77222 +
77223 + pax_open_kernel();
77224 set_pte_at(&init_mm, vaddr,
77225 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
77226 -
77227 + pax_close_kernel();
77228 pkmap_count[last_pkmap_nr] = 1;
77229 set_page_address(page, (void *)vaddr);
77230
77231 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
77232 index 5e1e508..ac70275 100644
77233 --- a/mm/hugetlb.c
77234 +++ b/mm/hugetlb.c
77235 @@ -869,6 +869,7 @@ free:
77236 list_del(&page->lru);
77237 enqueue_huge_page(h, page);
77238 }
77239 + spin_unlock(&hugetlb_lock);
77240
77241 /* Free unnecessary surplus pages to the buddy allocator */
77242 if (!list_empty(&surplus_list)) {
77243 @@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
77244 return 1;
77245 }
77246
77247 +#ifdef CONFIG_PAX_SEGMEXEC
77248 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
77249 +{
77250 + struct mm_struct *mm = vma->vm_mm;
77251 + struct vm_area_struct *vma_m;
77252 + unsigned long address_m;
77253 + pte_t *ptep_m;
77254 +
77255 + vma_m = pax_find_mirror_vma(vma);
77256 + if (!vma_m)
77257 + return;
77258 +
77259 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77260 + address_m = address + SEGMEXEC_TASK_SIZE;
77261 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
77262 + get_page(page_m);
77263 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
77264 +}
77265 +#endif
77266 +
77267 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
77268 unsigned long address, pte_t *ptep, pte_t pte,
77269 struct page *pagecache_page)
77270 @@ -2004,6 +2025,11 @@ retry_avoidcopy:
77271 huge_ptep_clear_flush(vma, address, ptep);
77272 set_huge_pte_at(mm, address, ptep,
77273 make_huge_pte(vma, new_page, 1));
77274 +
77275 +#ifdef CONFIG_PAX_SEGMEXEC
77276 + pax_mirror_huge_pte(vma, address, new_page);
77277 +#endif
77278 +
77279 /* Make the old page be freed below */
77280 new_page = old_page;
77281 }
77282 @@ -2135,6 +2161,10 @@ retry:
77283 && (vma->vm_flags & VM_SHARED)));
77284 set_huge_pte_at(mm, address, ptep, new_pte);
77285
77286 +#ifdef CONFIG_PAX_SEGMEXEC
77287 + pax_mirror_huge_pte(vma, address, page);
77288 +#endif
77289 +
77290 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
77291 /* Optimization, do the COW without a second fault */
77292 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
77293 @@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77294 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
77295 struct hstate *h = hstate_vma(vma);
77296
77297 +#ifdef CONFIG_PAX_SEGMEXEC
77298 + struct vm_area_struct *vma_m;
77299 +
77300 + vma_m = pax_find_mirror_vma(vma);
77301 + if (vma_m) {
77302 + unsigned long address_m;
77303 +
77304 + if (vma->vm_start > vma_m->vm_start) {
77305 + address_m = address;
77306 + address -= SEGMEXEC_TASK_SIZE;
77307 + vma = vma_m;
77308 + h = hstate_vma(vma);
77309 + } else
77310 + address_m = address + SEGMEXEC_TASK_SIZE;
77311 +
77312 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
77313 + return VM_FAULT_OOM;
77314 + address_m &= HPAGE_MASK;
77315 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
77316 + }
77317 +#endif
77318 +
77319 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
77320 if (!ptep)
77321 return VM_FAULT_OOM;
77322 diff --git a/mm/internal.h b/mm/internal.h
77323 index f03e8e2..7354343 100644
77324 --- a/mm/internal.h
77325 +++ b/mm/internal.h
77326 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
77327 * in mm/page_alloc.c
77328 */
77329 extern void __free_pages_bootmem(struct page *page, unsigned int order);
77330 +extern void free_compound_page(struct page *page);
77331 extern void prep_compound_page(struct page *page, unsigned long order);
77332
77333
77334 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
77335 index c346660..b47382f 100644
77336 --- a/mm/kmemleak.c
77337 +++ b/mm/kmemleak.c
77338 @@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
77339
77340 for (i = 0; i < object->trace_len; i++) {
77341 void *ptr = (void *)object->trace[i];
77342 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
77343 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
77344 }
77345 }
77346
77347 diff --git a/mm/maccess.c b/mm/maccess.c
77348 index 9073695..1127f348 100644
77349 --- a/mm/maccess.c
77350 +++ b/mm/maccess.c
77351 @@ -14,7 +14,7 @@
77352 * Safely read from address @src to the buffer at @dst. If a kernel fault
77353 * happens, handle that and return -EFAULT.
77354 */
77355 -long probe_kernel_read(void *dst, void *src, size_t size)
77356 +long probe_kernel_read(void *dst, const void *src, size_t size)
77357 {
77358 long ret;
77359 mm_segment_t old_fs = get_fs();
77360 @@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
77361 set_fs(KERNEL_DS);
77362 pagefault_disable();
77363 ret = __copy_from_user_inatomic(dst,
77364 - (__force const void __user *)src, size);
77365 + (const void __force_user *)src, size);
77366 pagefault_enable();
77367 set_fs(old_fs);
77368
77369 @@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
77370 * Safely write to address @dst from the buffer at @src. If a kernel fault
77371 * happens, handle that and return -EFAULT.
77372 */
77373 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
77374 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
77375 {
77376 long ret;
77377 mm_segment_t old_fs = get_fs();
77378
77379 set_fs(KERNEL_DS);
77380 pagefault_disable();
77381 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
77382 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
77383 pagefault_enable();
77384 set_fs(old_fs);
77385
77386 diff --git a/mm/madvise.c b/mm/madvise.c
77387 index 35b1479..499f7d4 100644
77388 --- a/mm/madvise.c
77389 +++ b/mm/madvise.c
77390 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
77391 pgoff_t pgoff;
77392 unsigned long new_flags = vma->vm_flags;
77393
77394 +#ifdef CONFIG_PAX_SEGMEXEC
77395 + struct vm_area_struct *vma_m;
77396 +#endif
77397 +
77398 switch (behavior) {
77399 case MADV_NORMAL:
77400 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
77401 @@ -103,6 +107,13 @@ success:
77402 /*
77403 * vm_flags is protected by the mmap_sem held in write mode.
77404 */
77405 +
77406 +#ifdef CONFIG_PAX_SEGMEXEC
77407 + vma_m = pax_find_mirror_vma(vma);
77408 + if (vma_m)
77409 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
77410 +#endif
77411 +
77412 vma->vm_flags = new_flags;
77413
77414 out:
77415 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77416 struct vm_area_struct ** prev,
77417 unsigned long start, unsigned long end)
77418 {
77419 +
77420 +#ifdef CONFIG_PAX_SEGMEXEC
77421 + struct vm_area_struct *vma_m;
77422 +#endif
77423 +
77424 *prev = vma;
77425 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
77426 return -EINVAL;
77427 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77428 zap_page_range(vma, start, end - start, &details);
77429 } else
77430 zap_page_range(vma, start, end - start, NULL);
77431 +
77432 +#ifdef CONFIG_PAX_SEGMEXEC
77433 + vma_m = pax_find_mirror_vma(vma);
77434 + if (vma_m) {
77435 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
77436 + struct zap_details details = {
77437 + .nonlinear_vma = vma_m,
77438 + .last_index = ULONG_MAX,
77439 + };
77440 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
77441 + } else
77442 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
77443 + }
77444 +#endif
77445 +
77446 return 0;
77447 }
77448
77449 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
77450 if (end < start)
77451 goto out;
77452
77453 +#ifdef CONFIG_PAX_SEGMEXEC
77454 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
77455 + if (end > SEGMEXEC_TASK_SIZE)
77456 + goto out;
77457 + } else
77458 +#endif
77459 +
77460 + if (end > TASK_SIZE)
77461 + goto out;
77462 +
77463 error = 0;
77464 if (end == start)
77465 goto out;
77466 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
77467 index 8aeba53..b4a4198 100644
77468 --- a/mm/memory-failure.c
77469 +++ b/mm/memory-failure.c
77470 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
77471
77472 int sysctl_memory_failure_recovery __read_mostly = 1;
77473
77474 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
77475 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
77476
77477 /*
77478 * Send all the processes who have the page mapped an ``action optional''
77479 @@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
77480 si.si_signo = SIGBUS;
77481 si.si_errno = 0;
77482 si.si_code = BUS_MCEERR_AO;
77483 - si.si_addr = (void *)addr;
77484 + si.si_addr = (void __user *)addr;
77485 #ifdef __ARCH_SI_TRAPNO
77486 si.si_trapno = trapno;
77487 #endif
77488 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
77489 return 0;
77490 }
77491
77492 - atomic_long_add(1, &mce_bad_pages);
77493 + atomic_long_add_unchecked(1, &mce_bad_pages);
77494
77495 /*
77496 * We need/can do nothing about count=0 pages.
77497 diff --git a/mm/memory.c b/mm/memory.c
77498 index 6c836d3..48f3264 100644
77499 --- a/mm/memory.c
77500 +++ b/mm/memory.c
77501 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
77502 return;
77503
77504 pmd = pmd_offset(pud, start);
77505 +
77506 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
77507 pud_clear(pud);
77508 pmd_free_tlb(tlb, pmd, start);
77509 +#endif
77510 +
77511 }
77512
77513 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
77514 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
77515 if (end - 1 > ceiling - 1)
77516 return;
77517
77518 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
77519 pud = pud_offset(pgd, start);
77520 pgd_clear(pgd);
77521 pud_free_tlb(tlb, pud, start);
77522 +#endif
77523 +
77524 }
77525
77526 /*
77527 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77528 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
77529 i = 0;
77530
77531 - do {
77532 + while (nr_pages) {
77533 struct vm_area_struct *vma;
77534
77535 - vma = find_extend_vma(mm, start);
77536 + vma = find_vma(mm, start);
77537 if (!vma && in_gate_area(tsk, start)) {
77538 unsigned long pg = start & PAGE_MASK;
77539 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
77540 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77541 continue;
77542 }
77543
77544 - if (!vma ||
77545 + if (!vma || start < vma->vm_start ||
77546 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
77547 !(vm_flags & vma->vm_flags))
77548 return i ? : -EFAULT;
77549 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77550 start += PAGE_SIZE;
77551 nr_pages--;
77552 } while (nr_pages && start < vma->vm_end);
77553 - } while (nr_pages);
77554 + }
77555 return i;
77556 }
77557
77558 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
77559 page_add_file_rmap(page);
77560 set_pte_at(mm, addr, pte, mk_pte(page, prot));
77561
77562 +#ifdef CONFIG_PAX_SEGMEXEC
77563 + pax_mirror_file_pte(vma, addr, page, ptl);
77564 +#endif
77565 +
77566 retval = 0;
77567 pte_unmap_unlock(pte, ptl);
77568 return retval;
77569 @@ -1560,10 +1571,22 @@ out:
77570 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
77571 struct page *page)
77572 {
77573 +
77574 +#ifdef CONFIG_PAX_SEGMEXEC
77575 + struct vm_area_struct *vma_m;
77576 +#endif
77577 +
77578 if (addr < vma->vm_start || addr >= vma->vm_end)
77579 return -EFAULT;
77580 if (!page_count(page))
77581 return -EINVAL;
77582 +
77583 +#ifdef CONFIG_PAX_SEGMEXEC
77584 + vma_m = pax_find_mirror_vma(vma);
77585 + if (vma_m)
77586 + vma_m->vm_flags |= VM_INSERTPAGE;
77587 +#endif
77588 +
77589 vma->vm_flags |= VM_INSERTPAGE;
77590 return insert_page(vma, addr, page, vma->vm_page_prot);
77591 }
77592 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
77593 unsigned long pfn)
77594 {
77595 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
77596 + BUG_ON(vma->vm_mirror);
77597
77598 if (addr < vma->vm_start || addr >= vma->vm_end)
77599 return -EFAULT;
77600 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
77601 copy_user_highpage(dst, src, va, vma);
77602 }
77603
77604 +#ifdef CONFIG_PAX_SEGMEXEC
77605 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
77606 +{
77607 + struct mm_struct *mm = vma->vm_mm;
77608 + spinlock_t *ptl;
77609 + pte_t *pte, entry;
77610 +
77611 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
77612 + entry = *pte;
77613 + if (!pte_present(entry)) {
77614 + if (!pte_none(entry)) {
77615 + BUG_ON(pte_file(entry));
77616 + free_swap_and_cache(pte_to_swp_entry(entry));
77617 + pte_clear_not_present_full(mm, address, pte, 0);
77618 + }
77619 + } else {
77620 + struct page *page;
77621 +
77622 + flush_cache_page(vma, address, pte_pfn(entry));
77623 + entry = ptep_clear_flush(vma, address, pte);
77624 + BUG_ON(pte_dirty(entry));
77625 + page = vm_normal_page(vma, address, entry);
77626 + if (page) {
77627 + update_hiwater_rss(mm);
77628 + if (PageAnon(page))
77629 + dec_mm_counter(mm, anon_rss);
77630 + else
77631 + dec_mm_counter(mm, file_rss);
77632 + page_remove_rmap(page);
77633 + page_cache_release(page);
77634 + }
77635 + }
77636 + pte_unmap_unlock(pte, ptl);
77637 +}
77638 +
77639 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
77640 + *
77641 + * the ptl of the lower mapped page is held on entry and is not released on exit
77642 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
77643 + */
77644 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77645 +{
77646 + struct mm_struct *mm = vma->vm_mm;
77647 + unsigned long address_m;
77648 + spinlock_t *ptl_m;
77649 + struct vm_area_struct *vma_m;
77650 + pmd_t *pmd_m;
77651 + pte_t *pte_m, entry_m;
77652 +
77653 + BUG_ON(!page_m || !PageAnon(page_m));
77654 +
77655 + vma_m = pax_find_mirror_vma(vma);
77656 + if (!vma_m)
77657 + return;
77658 +
77659 + BUG_ON(!PageLocked(page_m));
77660 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77661 + address_m = address + SEGMEXEC_TASK_SIZE;
77662 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77663 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77664 + ptl_m = pte_lockptr(mm, pmd_m);
77665 + if (ptl != ptl_m) {
77666 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77667 + if (!pte_none(*pte_m))
77668 + goto out;
77669 + }
77670 +
77671 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77672 + page_cache_get(page_m);
77673 + page_add_anon_rmap(page_m, vma_m, address_m);
77674 + inc_mm_counter(mm, anon_rss);
77675 + set_pte_at(mm, address_m, pte_m, entry_m);
77676 + update_mmu_cache(vma_m, address_m, entry_m);
77677 +out:
77678 + if (ptl != ptl_m)
77679 + spin_unlock(ptl_m);
77680 + pte_unmap_nested(pte_m);
77681 + unlock_page(page_m);
77682 +}
77683 +
77684 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77685 +{
77686 + struct mm_struct *mm = vma->vm_mm;
77687 + unsigned long address_m;
77688 + spinlock_t *ptl_m;
77689 + struct vm_area_struct *vma_m;
77690 + pmd_t *pmd_m;
77691 + pte_t *pte_m, entry_m;
77692 +
77693 + BUG_ON(!page_m || PageAnon(page_m));
77694 +
77695 + vma_m = pax_find_mirror_vma(vma);
77696 + if (!vma_m)
77697 + return;
77698 +
77699 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77700 + address_m = address + SEGMEXEC_TASK_SIZE;
77701 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77702 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77703 + ptl_m = pte_lockptr(mm, pmd_m);
77704 + if (ptl != ptl_m) {
77705 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77706 + if (!pte_none(*pte_m))
77707 + goto out;
77708 + }
77709 +
77710 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77711 + page_cache_get(page_m);
77712 + page_add_file_rmap(page_m);
77713 + inc_mm_counter(mm, file_rss);
77714 + set_pte_at(mm, address_m, pte_m, entry_m);
77715 + update_mmu_cache(vma_m, address_m, entry_m);
77716 +out:
77717 + if (ptl != ptl_m)
77718 + spin_unlock(ptl_m);
77719 + pte_unmap_nested(pte_m);
77720 +}
77721 +
77722 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
77723 +{
77724 + struct mm_struct *mm = vma->vm_mm;
77725 + unsigned long address_m;
77726 + spinlock_t *ptl_m;
77727 + struct vm_area_struct *vma_m;
77728 + pmd_t *pmd_m;
77729 + pte_t *pte_m, entry_m;
77730 +
77731 + vma_m = pax_find_mirror_vma(vma);
77732 + if (!vma_m)
77733 + return;
77734 +
77735 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77736 + address_m = address + SEGMEXEC_TASK_SIZE;
77737 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77738 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77739 + ptl_m = pte_lockptr(mm, pmd_m);
77740 + if (ptl != ptl_m) {
77741 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77742 + if (!pte_none(*pte_m))
77743 + goto out;
77744 + }
77745 +
77746 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
77747 + set_pte_at(mm, address_m, pte_m, entry_m);
77748 +out:
77749 + if (ptl != ptl_m)
77750 + spin_unlock(ptl_m);
77751 + pte_unmap_nested(pte_m);
77752 +}
77753 +
77754 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
77755 +{
77756 + struct page *page_m;
77757 + pte_t entry;
77758 +
77759 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
77760 + goto out;
77761 +
77762 + entry = *pte;
77763 + page_m = vm_normal_page(vma, address, entry);
77764 + if (!page_m)
77765 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
77766 + else if (PageAnon(page_m)) {
77767 + if (pax_find_mirror_vma(vma)) {
77768 + pte_unmap_unlock(pte, ptl);
77769 + lock_page(page_m);
77770 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
77771 + if (pte_same(entry, *pte))
77772 + pax_mirror_anon_pte(vma, address, page_m, ptl);
77773 + else
77774 + unlock_page(page_m);
77775 + }
77776 + } else
77777 + pax_mirror_file_pte(vma, address, page_m, ptl);
77778 +
77779 +out:
77780 + pte_unmap_unlock(pte, ptl);
77781 +}
77782 +#endif
77783 +
77784 /*
77785 * This routine handles present pages, when users try to write
77786 * to a shared page. It is done by copying the page to a new address
77787 @@ -2156,6 +2360,12 @@ gotten:
77788 */
77789 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77790 if (likely(pte_same(*page_table, orig_pte))) {
77791 +
77792 +#ifdef CONFIG_PAX_SEGMEXEC
77793 + if (pax_find_mirror_vma(vma))
77794 + BUG_ON(!trylock_page(new_page));
77795 +#endif
77796 +
77797 if (old_page) {
77798 if (!PageAnon(old_page)) {
77799 dec_mm_counter(mm, file_rss);
77800 @@ -2207,6 +2417,10 @@ gotten:
77801 page_remove_rmap(old_page);
77802 }
77803
77804 +#ifdef CONFIG_PAX_SEGMEXEC
77805 + pax_mirror_anon_pte(vma, address, new_page, ptl);
77806 +#endif
77807 +
77808 /* Free the old page.. */
77809 new_page = old_page;
77810 ret |= VM_FAULT_WRITE;
77811 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77812 swap_free(entry);
77813 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
77814 try_to_free_swap(page);
77815 +
77816 +#ifdef CONFIG_PAX_SEGMEXEC
77817 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
77818 +#endif
77819 +
77820 unlock_page(page);
77821
77822 if (flags & FAULT_FLAG_WRITE) {
77823 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77824
77825 /* No need to invalidate - it was non-present before */
77826 update_mmu_cache(vma, address, pte);
77827 +
77828 +#ifdef CONFIG_PAX_SEGMEXEC
77829 + pax_mirror_anon_pte(vma, address, page, ptl);
77830 +#endif
77831 +
77832 unlock:
77833 pte_unmap_unlock(page_table, ptl);
77834 out:
77835 @@ -2632,40 +2856,6 @@ out_release:
77836 }
77837
77838 /*
77839 - * This is like a special single-page "expand_{down|up}wards()",
77840 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
77841 - * doesn't hit another vma.
77842 - */
77843 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
77844 -{
77845 - address &= PAGE_MASK;
77846 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
77847 - struct vm_area_struct *prev = vma->vm_prev;
77848 -
77849 - /*
77850 - * Is there a mapping abutting this one below?
77851 - *
77852 - * That's only ok if it's the same stack mapping
77853 - * that has gotten split..
77854 - */
77855 - if (prev && prev->vm_end == address)
77856 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
77857 -
77858 - expand_stack(vma, address - PAGE_SIZE);
77859 - }
77860 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
77861 - struct vm_area_struct *next = vma->vm_next;
77862 -
77863 - /* As VM_GROWSDOWN but s/below/above/ */
77864 - if (next && next->vm_start == address + PAGE_SIZE)
77865 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
77866 -
77867 - expand_upwards(vma, address + PAGE_SIZE);
77868 - }
77869 - return 0;
77870 -}
77871 -
77872 -/*
77873 * We enter with non-exclusive mmap_sem (to exclude vma changes,
77874 * but allow concurrent faults), and pte mapped but not yet locked.
77875 * We return with mmap_sem still held, but pte unmapped and unlocked.
77876 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77877 unsigned long address, pte_t *page_table, pmd_t *pmd,
77878 unsigned int flags)
77879 {
77880 - struct page *page;
77881 + struct page *page = NULL;
77882 spinlock_t *ptl;
77883 pte_t entry;
77884
77885 - pte_unmap(page_table);
77886 -
77887 - /* Check if we need to add a guard page to the stack */
77888 - if (check_stack_guard_page(vma, address) < 0)
77889 - return VM_FAULT_SIGBUS;
77890 -
77891 - /* Use the zero-page for reads */
77892 if (!(flags & FAULT_FLAG_WRITE)) {
77893 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
77894 vma->vm_page_prot));
77895 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77896 + ptl = pte_lockptr(mm, pmd);
77897 + spin_lock(ptl);
77898 if (!pte_none(*page_table))
77899 goto unlock;
77900 goto setpte;
77901 }
77902
77903 /* Allocate our own private page. */
77904 + pte_unmap(page_table);
77905 +
77906 if (unlikely(anon_vma_prepare(vma)))
77907 goto oom;
77908 page = alloc_zeroed_user_highpage_movable(vma, address);
77909 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77910 if (!pte_none(*page_table))
77911 goto release;
77912
77913 +#ifdef CONFIG_PAX_SEGMEXEC
77914 + if (pax_find_mirror_vma(vma))
77915 + BUG_ON(!trylock_page(page));
77916 +#endif
77917 +
77918 inc_mm_counter(mm, anon_rss);
77919 page_add_new_anon_rmap(page, vma, address);
77920 setpte:
77921 @@ -2720,6 +2911,12 @@ setpte:
77922
77923 /* No need to invalidate - it was non-present before */
77924 update_mmu_cache(vma, address, entry);
77925 +
77926 +#ifdef CONFIG_PAX_SEGMEXEC
77927 + if (page)
77928 + pax_mirror_anon_pte(vma, address, page, ptl);
77929 +#endif
77930 +
77931 unlock:
77932 pte_unmap_unlock(page_table, ptl);
77933 return 0;
77934 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77935 */
77936 /* Only go through if we didn't race with anybody else... */
77937 if (likely(pte_same(*page_table, orig_pte))) {
77938 +
77939 +#ifdef CONFIG_PAX_SEGMEXEC
77940 + if (anon && pax_find_mirror_vma(vma))
77941 + BUG_ON(!trylock_page(page));
77942 +#endif
77943 +
77944 flush_icache_page(vma, page);
77945 entry = mk_pte(page, vma->vm_page_prot);
77946 if (flags & FAULT_FLAG_WRITE)
77947 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77948
77949 /* no need to invalidate: a not-present page won't be cached */
77950 update_mmu_cache(vma, address, entry);
77951 +
77952 +#ifdef CONFIG_PAX_SEGMEXEC
77953 + if (anon)
77954 + pax_mirror_anon_pte(vma, address, page, ptl);
77955 + else
77956 + pax_mirror_file_pte(vma, address, page, ptl);
77957 +#endif
77958 +
77959 } else {
77960 if (charged)
77961 mem_cgroup_uncharge_page(page);
77962 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
77963 if (flags & FAULT_FLAG_WRITE)
77964 flush_tlb_page(vma, address);
77965 }
77966 +
77967 +#ifdef CONFIG_PAX_SEGMEXEC
77968 + pax_mirror_pte(vma, address, pte, pmd, ptl);
77969 + return 0;
77970 +#endif
77971 +
77972 unlock:
77973 pte_unmap_unlock(pte, ptl);
77974 return 0;
77975 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77976 pmd_t *pmd;
77977 pte_t *pte;
77978
77979 +#ifdef CONFIG_PAX_SEGMEXEC
77980 + struct vm_area_struct *vma_m;
77981 +#endif
77982 +
77983 __set_current_state(TASK_RUNNING);
77984
77985 count_vm_event(PGFAULT);
77986 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77987 if (unlikely(is_vm_hugetlb_page(vma)))
77988 return hugetlb_fault(mm, vma, address, flags);
77989
77990 +#ifdef CONFIG_PAX_SEGMEXEC
77991 + vma_m = pax_find_mirror_vma(vma);
77992 + if (vma_m) {
77993 + unsigned long address_m;
77994 + pgd_t *pgd_m;
77995 + pud_t *pud_m;
77996 + pmd_t *pmd_m;
77997 +
77998 + if (vma->vm_start > vma_m->vm_start) {
77999 + address_m = address;
78000 + address -= SEGMEXEC_TASK_SIZE;
78001 + vma = vma_m;
78002 + } else
78003 + address_m = address + SEGMEXEC_TASK_SIZE;
78004 +
78005 + pgd_m = pgd_offset(mm, address_m);
78006 + pud_m = pud_alloc(mm, pgd_m, address_m);
78007 + if (!pud_m)
78008 + return VM_FAULT_OOM;
78009 + pmd_m = pmd_alloc(mm, pud_m, address_m);
78010 + if (!pmd_m)
78011 + return VM_FAULT_OOM;
78012 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
78013 + return VM_FAULT_OOM;
78014 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
78015 + }
78016 +#endif
78017 +
78018 pgd = pgd_offset(mm, address);
78019 pud = pud_alloc(mm, pgd, address);
78020 if (!pud)
78021 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
78022 gate_vma.vm_start = FIXADDR_USER_START;
78023 gate_vma.vm_end = FIXADDR_USER_END;
78024 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
78025 - gate_vma.vm_page_prot = __P101;
78026 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
78027 /*
78028 * Make sure the vDSO gets into every core dump.
78029 * Dumping its contents makes post-mortem fully interpretable later
78030 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
78031 index 3c6e3e2..b1ddbb8 100644
78032 --- a/mm/mempolicy.c
78033 +++ b/mm/mempolicy.c
78034 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
78035 struct vm_area_struct *next;
78036 int err;
78037
78038 +#ifdef CONFIG_PAX_SEGMEXEC
78039 + struct vm_area_struct *vma_m;
78040 +#endif
78041 +
78042 err = 0;
78043 for (; vma && vma->vm_start < end; vma = next) {
78044 next = vma->vm_next;
78045 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
78046 err = policy_vma(vma, new);
78047 if (err)
78048 break;
78049 +
78050 +#ifdef CONFIG_PAX_SEGMEXEC
78051 + vma_m = pax_find_mirror_vma(vma);
78052 + if (vma_m) {
78053 + err = policy_vma(vma_m, new);
78054 + if (err)
78055 + break;
78056 + }
78057 +#endif
78058 +
78059 }
78060 return err;
78061 }
78062 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
78063
78064 if (end < start)
78065 return -EINVAL;
78066 +
78067 +#ifdef CONFIG_PAX_SEGMEXEC
78068 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
78069 + if (end > SEGMEXEC_TASK_SIZE)
78070 + return -EINVAL;
78071 + } else
78072 +#endif
78073 +
78074 + if (end > TASK_SIZE)
78075 + return -EINVAL;
78076 +
78077 if (end == start)
78078 return 0;
78079
78080 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
78081 if (!mm)
78082 return -EINVAL;
78083
78084 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78085 + if (mm != current->mm &&
78086 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
78087 + err = -EPERM;
78088 + goto out;
78089 + }
78090 +#endif
78091 +
78092 /*
78093 * Check if this process has the right to modify the specified
78094 * process. The right exists if the process has administrative
78095 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
78096 rcu_read_lock();
78097 tcred = __task_cred(task);
78098 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
78099 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
78100 - !capable(CAP_SYS_NICE)) {
78101 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
78102 rcu_read_unlock();
78103 err = -EPERM;
78104 goto out;
78105 @@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
78106 }
78107 #endif
78108
78109 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78110 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
78111 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
78112 + _mm->pax_flags & MF_PAX_SEGMEXEC))
78113 +#endif
78114 +
78115 /*
78116 * Display pages allocated per node and memory policy via /proc.
78117 */
78118 @@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
78119 int n;
78120 char buffer[50];
78121
78122 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78123 + if (current->exec_id != m->exec_id) {
78124 + gr_log_badprocpid("numa_maps");
78125 + return 0;
78126 + }
78127 +#endif
78128 +
78129 if (!mm)
78130 return 0;
78131
78132 @@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
78133 mpol_to_str(buffer, sizeof(buffer), pol, 0);
78134 mpol_cond_put(pol);
78135
78136 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78137 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
78138 +#else
78139 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
78140 +#endif
78141
78142 if (file) {
78143 seq_printf(m, " file=");
78144 - seq_path(m, &file->f_path, "\n\t= ");
78145 + seq_path(m, &file->f_path, "\n\t\\= ");
78146 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
78147 seq_printf(m, " heap");
78148 } else if (vma->vm_start <= mm->start_stack &&
78149 diff --git a/mm/migrate.c b/mm/migrate.c
78150 index aaca868..2ebecdc 100644
78151 --- a/mm/migrate.c
78152 +++ b/mm/migrate.c
78153 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
78154 unsigned long chunk_start;
78155 int err;
78156
78157 + pax_track_stack();
78158 +
78159 task_nodes = cpuset_mems_allowed(task);
78160
78161 err = -ENOMEM;
78162 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
78163 if (!mm)
78164 return -EINVAL;
78165
78166 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78167 + if (mm != current->mm &&
78168 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
78169 + err = -EPERM;
78170 + goto out;
78171 + }
78172 +#endif
78173 +
78174 /*
78175 * Check if this process has the right to modify the specified
78176 * process. The right exists if the process has administrative
78177 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
78178 rcu_read_lock();
78179 tcred = __task_cred(task);
78180 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
78181 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
78182 - !capable(CAP_SYS_NICE)) {
78183 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
78184 rcu_read_unlock();
78185 err = -EPERM;
78186 goto out;
78187 diff --git a/mm/mlock.c b/mm/mlock.c
78188 index 2d846cf..98134d2 100644
78189 --- a/mm/mlock.c
78190 +++ b/mm/mlock.c
78191 @@ -13,6 +13,7 @@
78192 #include <linux/pagemap.h>
78193 #include <linux/mempolicy.h>
78194 #include <linux/syscalls.h>
78195 +#include <linux/security.h>
78196 #include <linux/sched.h>
78197 #include <linux/module.h>
78198 #include <linux/rmap.h>
78199 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
78200 }
78201 }
78202
78203 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
78204 -{
78205 - return (vma->vm_flags & VM_GROWSDOWN) &&
78206 - (vma->vm_start == addr) &&
78207 - !vma_stack_continue(vma->vm_prev, addr);
78208 -}
78209 -
78210 /**
78211 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
78212 * @vma: target vma
78213 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
78214 if (vma->vm_flags & VM_WRITE)
78215 gup_flags |= FOLL_WRITE;
78216
78217 - /* We don't try to access the guard page of a stack vma */
78218 - if (stack_guard_page(vma, start)) {
78219 - addr += PAGE_SIZE;
78220 - nr_pages--;
78221 - }
78222 -
78223 while (nr_pages > 0) {
78224 int i;
78225
78226 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
78227 {
78228 unsigned long nstart, end, tmp;
78229 struct vm_area_struct * vma, * prev;
78230 - int error;
78231 + int error = -EINVAL;
78232
78233 len = PAGE_ALIGN(len);
78234 end = start + len;
78235 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
78236 return -EINVAL;
78237 if (end == start)
78238 return 0;
78239 + if (end > TASK_SIZE)
78240 + return -EINVAL;
78241 +
78242 vma = find_vma_prev(current->mm, start, &prev);
78243 if (!vma || vma->vm_start > start)
78244 return -ENOMEM;
78245 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
78246 for (nstart = start ; ; ) {
78247 unsigned int newflags;
78248
78249 +#ifdef CONFIG_PAX_SEGMEXEC
78250 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
78251 + break;
78252 +#endif
78253 +
78254 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
78255
78256 newflags = vma->vm_flags | VM_LOCKED;
78257 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
78258 lock_limit >>= PAGE_SHIFT;
78259
78260 /* check against resource limits */
78261 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
78262 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
78263 error = do_mlock(start, len, 1);
78264 up_write(&current->mm->mmap_sem);
78265 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
78266 static int do_mlockall(int flags)
78267 {
78268 struct vm_area_struct * vma, * prev = NULL;
78269 - unsigned int def_flags = 0;
78270
78271 if (flags & MCL_FUTURE)
78272 - def_flags = VM_LOCKED;
78273 - current->mm->def_flags = def_flags;
78274 + current->mm->def_flags |= VM_LOCKED;
78275 + else
78276 + current->mm->def_flags &= ~VM_LOCKED;
78277 if (flags == MCL_FUTURE)
78278 goto out;
78279
78280 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
78281 - unsigned int newflags;
78282 + unsigned long newflags;
78283
78284 +#ifdef CONFIG_PAX_SEGMEXEC
78285 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
78286 + break;
78287 +#endif
78288 +
78289 + BUG_ON(vma->vm_end > TASK_SIZE);
78290 newflags = vma->vm_flags | VM_LOCKED;
78291 if (!(flags & MCL_CURRENT))
78292 newflags &= ~VM_LOCKED;
78293 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
78294 lock_limit >>= PAGE_SHIFT;
78295
78296 ret = -ENOMEM;
78297 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
78298 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
78299 capable(CAP_IPC_LOCK))
78300 ret = do_mlockall(flags);
78301 diff --git a/mm/mmap.c b/mm/mmap.c
78302 index 4b80cbf..12a7861 100644
78303 --- a/mm/mmap.c
78304 +++ b/mm/mmap.c
78305 @@ -45,6 +45,16 @@
78306 #define arch_rebalance_pgtables(addr, len) (addr)
78307 #endif
78308
78309 +static inline void verify_mm_writelocked(struct mm_struct *mm)
78310 +{
78311 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
78312 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
78313 + up_read(&mm->mmap_sem);
78314 + BUG();
78315 + }
78316 +#endif
78317 +}
78318 +
78319 static void unmap_region(struct mm_struct *mm,
78320 struct vm_area_struct *vma, struct vm_area_struct *prev,
78321 unsigned long start, unsigned long end);
78322 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
78323 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
78324 *
78325 */
78326 -pgprot_t protection_map[16] = {
78327 +pgprot_t protection_map[16] __read_only = {
78328 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
78329 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
78330 };
78331
78332 pgprot_t vm_get_page_prot(unsigned long vm_flags)
78333 {
78334 - return __pgprot(pgprot_val(protection_map[vm_flags &
78335 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
78336 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
78337 pgprot_val(arch_vm_get_page_prot(vm_flags)));
78338 +
78339 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78340 + if (!nx_enabled &&
78341 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
78342 + (vm_flags & (VM_READ | VM_WRITE)))
78343 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
78344 +#endif
78345 +
78346 + return prot;
78347 }
78348 EXPORT_SYMBOL(vm_get_page_prot);
78349
78350 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
78351 int sysctl_overcommit_ratio = 50; /* default is 50% */
78352 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
78353 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
78354 struct percpu_counter vm_committed_as;
78355
78356 /*
78357 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
78358 struct vm_area_struct *next = vma->vm_next;
78359
78360 might_sleep();
78361 + BUG_ON(vma->vm_mirror);
78362 if (vma->vm_ops && vma->vm_ops->close)
78363 vma->vm_ops->close(vma);
78364 if (vma->vm_file) {
78365 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
78366 * not page aligned -Ram Gupta
78367 */
78368 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
78369 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
78370 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
78371 (mm->end_data - mm->start_data) > rlim)
78372 goto out;
78373 @@ -704,6 +726,12 @@ static int
78374 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
78375 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78376 {
78377 +
78378 +#ifdef CONFIG_PAX_SEGMEXEC
78379 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
78380 + return 0;
78381 +#endif
78382 +
78383 if (is_mergeable_vma(vma, file, vm_flags) &&
78384 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78385 if (vma->vm_pgoff == vm_pgoff)
78386 @@ -723,6 +751,12 @@ static int
78387 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78388 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78389 {
78390 +
78391 +#ifdef CONFIG_PAX_SEGMEXEC
78392 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
78393 + return 0;
78394 +#endif
78395 +
78396 if (is_mergeable_vma(vma, file, vm_flags) &&
78397 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78398 pgoff_t vm_pglen;
78399 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78400 struct vm_area_struct *vma_merge(struct mm_struct *mm,
78401 struct vm_area_struct *prev, unsigned long addr,
78402 unsigned long end, unsigned long vm_flags,
78403 - struct anon_vma *anon_vma, struct file *file,
78404 + struct anon_vma *anon_vma, struct file *file,
78405 pgoff_t pgoff, struct mempolicy *policy)
78406 {
78407 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
78408 struct vm_area_struct *area, *next;
78409
78410 +#ifdef CONFIG_PAX_SEGMEXEC
78411 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
78412 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
78413 +
78414 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
78415 +#endif
78416 +
78417 /*
78418 * We later require that vma->vm_flags == vm_flags,
78419 * so this tests vma->vm_flags & VM_SPECIAL, too.
78420 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78421 if (next && next->vm_end == end) /* cases 6, 7, 8 */
78422 next = next->vm_next;
78423
78424 +#ifdef CONFIG_PAX_SEGMEXEC
78425 + if (prev)
78426 + prev_m = pax_find_mirror_vma(prev);
78427 + if (area)
78428 + area_m = pax_find_mirror_vma(area);
78429 + if (next)
78430 + next_m = pax_find_mirror_vma(next);
78431 +#endif
78432 +
78433 /*
78434 * Can it merge with the predecessor?
78435 */
78436 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78437 /* cases 1, 6 */
78438 vma_adjust(prev, prev->vm_start,
78439 next->vm_end, prev->vm_pgoff, NULL);
78440 - } else /* cases 2, 5, 7 */
78441 +
78442 +#ifdef CONFIG_PAX_SEGMEXEC
78443 + if (prev_m)
78444 + vma_adjust(prev_m, prev_m->vm_start,
78445 + next_m->vm_end, prev_m->vm_pgoff, NULL);
78446 +#endif
78447 +
78448 + } else { /* cases 2, 5, 7 */
78449 vma_adjust(prev, prev->vm_start,
78450 end, prev->vm_pgoff, NULL);
78451 +
78452 +#ifdef CONFIG_PAX_SEGMEXEC
78453 + if (prev_m)
78454 + vma_adjust(prev_m, prev_m->vm_start,
78455 + end_m, prev_m->vm_pgoff, NULL);
78456 +#endif
78457 +
78458 + }
78459 return prev;
78460 }
78461
78462 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78463 mpol_equal(policy, vma_policy(next)) &&
78464 can_vma_merge_before(next, vm_flags,
78465 anon_vma, file, pgoff+pglen)) {
78466 - if (prev && addr < prev->vm_end) /* case 4 */
78467 + if (prev && addr < prev->vm_end) { /* case 4 */
78468 vma_adjust(prev, prev->vm_start,
78469 addr, prev->vm_pgoff, NULL);
78470 - else /* cases 3, 8 */
78471 +
78472 +#ifdef CONFIG_PAX_SEGMEXEC
78473 + if (prev_m)
78474 + vma_adjust(prev_m, prev_m->vm_start,
78475 + addr_m, prev_m->vm_pgoff, NULL);
78476 +#endif
78477 +
78478 + } else { /* cases 3, 8 */
78479 vma_adjust(area, addr, next->vm_end,
78480 next->vm_pgoff - pglen, NULL);
78481 +
78482 +#ifdef CONFIG_PAX_SEGMEXEC
78483 + if (area_m)
78484 + vma_adjust(area_m, addr_m, next_m->vm_end,
78485 + next_m->vm_pgoff - pglen, NULL);
78486 +#endif
78487 +
78488 + }
78489 return area;
78490 }
78491
78492 @@ -898,14 +978,11 @@ none:
78493 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
78494 struct file *file, long pages)
78495 {
78496 - const unsigned long stack_flags
78497 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
78498 -
78499 if (file) {
78500 mm->shared_vm += pages;
78501 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
78502 mm->exec_vm += pages;
78503 - } else if (flags & stack_flags)
78504 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
78505 mm->stack_vm += pages;
78506 if (flags & (VM_RESERVED|VM_IO))
78507 mm->reserved_vm += pages;
78508 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78509 * (the exception is when the underlying filesystem is noexec
78510 * mounted, in which case we dont add PROT_EXEC.)
78511 */
78512 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
78513 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
78514 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
78515 prot |= PROT_EXEC;
78516
78517 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78518 /* Obtain the address to map to. we verify (or select) it and ensure
78519 * that it represents a valid section of the address space.
78520 */
78521 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
78522 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
78523 if (addr & ~PAGE_MASK)
78524 return addr;
78525
78526 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78527 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
78528 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
78529
78530 +#ifdef CONFIG_PAX_MPROTECT
78531 + if (mm->pax_flags & MF_PAX_MPROTECT) {
78532 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
78533 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
78534 + gr_log_rwxmmap(file);
78535 +
78536 +#ifdef CONFIG_PAX_EMUPLT
78537 + vm_flags &= ~VM_EXEC;
78538 +#else
78539 + return -EPERM;
78540 +#endif
78541 +
78542 + }
78543 +
78544 + if (!(vm_flags & VM_EXEC))
78545 + vm_flags &= ~VM_MAYEXEC;
78546 +#else
78547 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
78548 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
78549 +#endif
78550 + else
78551 + vm_flags &= ~VM_MAYWRITE;
78552 + }
78553 +#endif
78554 +
78555 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78556 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
78557 + vm_flags &= ~VM_PAGEEXEC;
78558 +#endif
78559 +
78560 if (flags & MAP_LOCKED)
78561 if (!can_do_mlock())
78562 return -EPERM;
78563 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78564 locked += mm->locked_vm;
78565 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
78566 lock_limit >>= PAGE_SHIFT;
78567 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78568 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
78569 return -EAGAIN;
78570 }
78571 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78572 if (error)
78573 return error;
78574
78575 + if (!gr_acl_handle_mmap(file, prot))
78576 + return -EACCES;
78577 +
78578 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
78579 }
78580 EXPORT_SYMBOL(do_mmap_pgoff);
78581 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
78582 */
78583 int vma_wants_writenotify(struct vm_area_struct *vma)
78584 {
78585 - unsigned int vm_flags = vma->vm_flags;
78586 + unsigned long vm_flags = vma->vm_flags;
78587
78588 /* If it was private or non-writable, the write bit is already clear */
78589 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
78590 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
78591 return 0;
78592
78593 /* The backer wishes to know when pages are first written to? */
78594 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
78595 unsigned long charged = 0;
78596 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
78597
78598 +#ifdef CONFIG_PAX_SEGMEXEC
78599 + struct vm_area_struct *vma_m = NULL;
78600 +#endif
78601 +
78602 + /*
78603 + * mm->mmap_sem is required to protect against another thread
78604 + * changing the mappings in case we sleep.
78605 + */
78606 + verify_mm_writelocked(mm);
78607 +
78608 /* Clear old maps */
78609 error = -ENOMEM;
78610 -munmap_back:
78611 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78612 if (vma && vma->vm_start < addr + len) {
78613 if (do_munmap(mm, addr, len))
78614 return -ENOMEM;
78615 - goto munmap_back;
78616 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78617 + BUG_ON(vma && vma->vm_start < addr + len);
78618 }
78619
78620 /* Check against address space limit. */
78621 @@ -1173,6 +1294,16 @@ munmap_back:
78622 goto unacct_error;
78623 }
78624
78625 +#ifdef CONFIG_PAX_SEGMEXEC
78626 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
78627 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78628 + if (!vma_m) {
78629 + error = -ENOMEM;
78630 + goto free_vma;
78631 + }
78632 + }
78633 +#endif
78634 +
78635 vma->vm_mm = mm;
78636 vma->vm_start = addr;
78637 vma->vm_end = addr + len;
78638 @@ -1180,8 +1311,9 @@ munmap_back:
78639 vma->vm_page_prot = vm_get_page_prot(vm_flags);
78640 vma->vm_pgoff = pgoff;
78641
78642 + error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
78643 +
78644 if (file) {
78645 - error = -EINVAL;
78646 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
78647 goto free_vma;
78648 if (vm_flags & VM_DENYWRITE) {
78649 @@ -1195,6 +1327,19 @@ munmap_back:
78650 error = file->f_op->mmap(file, vma);
78651 if (error)
78652 goto unmap_and_free_vma;
78653 +
78654 +#ifdef CONFIG_PAX_SEGMEXEC
78655 + if (vma_m && (vm_flags & VM_EXECUTABLE))
78656 + added_exe_file_vma(mm);
78657 +#endif
78658 +
78659 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78660 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
78661 + vma->vm_flags |= VM_PAGEEXEC;
78662 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78663 + }
78664 +#endif
78665 +
78666 if (vm_flags & VM_EXECUTABLE)
78667 added_exe_file_vma(mm);
78668
78669 @@ -1207,6 +1352,8 @@ munmap_back:
78670 pgoff = vma->vm_pgoff;
78671 vm_flags = vma->vm_flags;
78672 } else if (vm_flags & VM_SHARED) {
78673 + if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
78674 + goto free_vma;
78675 error = shmem_zero_setup(vma);
78676 if (error)
78677 goto free_vma;
78678 @@ -1218,6 +1365,11 @@ munmap_back:
78679 vma_link(mm, vma, prev, rb_link, rb_parent);
78680 file = vma->vm_file;
78681
78682 +#ifdef CONFIG_PAX_SEGMEXEC
78683 + if (vma_m)
78684 + pax_mirror_vma(vma_m, vma);
78685 +#endif
78686 +
78687 /* Once vma denies write, undo our temporary denial count */
78688 if (correct_wcount)
78689 atomic_inc(&inode->i_writecount);
78690 @@ -1226,6 +1378,7 @@ out:
78691
78692 mm->total_vm += len >> PAGE_SHIFT;
78693 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
78694 + track_exec_limit(mm, addr, addr + len, vm_flags);
78695 if (vm_flags & VM_LOCKED) {
78696 /*
78697 * makes pages present; downgrades, drops, reacquires mmap_sem
78698 @@ -1248,6 +1401,12 @@ unmap_and_free_vma:
78699 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
78700 charged = 0;
78701 free_vma:
78702 +
78703 +#ifdef CONFIG_PAX_SEGMEXEC
78704 + if (vma_m)
78705 + kmem_cache_free(vm_area_cachep, vma_m);
78706 +#endif
78707 +
78708 kmem_cache_free(vm_area_cachep, vma);
78709 unacct_error:
78710 if (charged)
78711 @@ -1255,6 +1414,44 @@ unacct_error:
78712 return error;
78713 }
78714
78715 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
78716 +{
78717 + if (!vma) {
78718 +#ifdef CONFIG_STACK_GROWSUP
78719 + if (addr > sysctl_heap_stack_gap)
78720 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
78721 + else
78722 + vma = find_vma(current->mm, 0);
78723 + if (vma && (vma->vm_flags & VM_GROWSUP))
78724 + return false;
78725 +#endif
78726 + return true;
78727 + }
78728 +
78729 + if (addr + len > vma->vm_start)
78730 + return false;
78731 +
78732 + if (vma->vm_flags & VM_GROWSDOWN)
78733 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
78734 +#ifdef CONFIG_STACK_GROWSUP
78735 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
78736 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
78737 +#endif
78738 +
78739 + return true;
78740 +}
78741 +
78742 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
78743 +{
78744 + if (vma->vm_start < len)
78745 + return -ENOMEM;
78746 + if (!(vma->vm_flags & VM_GROWSDOWN))
78747 + return vma->vm_start - len;
78748 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
78749 + return vma->vm_start - len - sysctl_heap_stack_gap;
78750 + return -ENOMEM;
78751 +}
78752 +
78753 /* Get an address range which is currently unmapped.
78754 * For shmat() with addr=0.
78755 *
78756 @@ -1281,18 +1478,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78757 if (flags & MAP_FIXED)
78758 return addr;
78759
78760 +#ifdef CONFIG_PAX_RANDMMAP
78761 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78762 +#endif
78763 +
78764 if (addr) {
78765 addr = PAGE_ALIGN(addr);
78766 - vma = find_vma(mm, addr);
78767 - if (TASK_SIZE - len >= addr &&
78768 - (!vma || addr + len <= vma->vm_start))
78769 - return addr;
78770 + if (TASK_SIZE - len >= addr) {
78771 + vma = find_vma(mm, addr);
78772 + if (check_heap_stack_gap(vma, addr, len))
78773 + return addr;
78774 + }
78775 }
78776 if (len > mm->cached_hole_size) {
78777 - start_addr = addr = mm->free_area_cache;
78778 + start_addr = addr = mm->free_area_cache;
78779 } else {
78780 - start_addr = addr = TASK_UNMAPPED_BASE;
78781 - mm->cached_hole_size = 0;
78782 + start_addr = addr = mm->mmap_base;
78783 + mm->cached_hole_size = 0;
78784 }
78785
78786 full_search:
78787 @@ -1303,34 +1505,40 @@ full_search:
78788 * Start a new search - just in case we missed
78789 * some holes.
78790 */
78791 - if (start_addr != TASK_UNMAPPED_BASE) {
78792 - addr = TASK_UNMAPPED_BASE;
78793 - start_addr = addr;
78794 + if (start_addr != mm->mmap_base) {
78795 + start_addr = addr = mm->mmap_base;
78796 mm->cached_hole_size = 0;
78797 goto full_search;
78798 }
78799 return -ENOMEM;
78800 }
78801 - if (!vma || addr + len <= vma->vm_start) {
78802 - /*
78803 - * Remember the place where we stopped the search:
78804 - */
78805 - mm->free_area_cache = addr + len;
78806 - return addr;
78807 - }
78808 + if (check_heap_stack_gap(vma, addr, len))
78809 + break;
78810 if (addr + mm->cached_hole_size < vma->vm_start)
78811 mm->cached_hole_size = vma->vm_start - addr;
78812 addr = vma->vm_end;
78813 }
78814 +
78815 + /*
78816 + * Remember the place where we stopped the search:
78817 + */
78818 + mm->free_area_cache = addr + len;
78819 + return addr;
78820 }
78821 #endif
78822
78823 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
78824 {
78825 +
78826 +#ifdef CONFIG_PAX_SEGMEXEC
78827 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78828 + return;
78829 +#endif
78830 +
78831 /*
78832 * Is this a new hole at the lowest possible address?
78833 */
78834 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
78835 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
78836 mm->free_area_cache = addr;
78837 mm->cached_hole_size = ~0UL;
78838 }
78839 @@ -1348,7 +1556,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78840 {
78841 struct vm_area_struct *vma;
78842 struct mm_struct *mm = current->mm;
78843 - unsigned long addr = addr0;
78844 + unsigned long base = mm->mmap_base, addr = addr0;
78845
78846 /* requested length too big for entire address space */
78847 if (len > TASK_SIZE)
78848 @@ -1357,13 +1565,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78849 if (flags & MAP_FIXED)
78850 return addr;
78851
78852 +#ifdef CONFIG_PAX_RANDMMAP
78853 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78854 +#endif
78855 +
78856 /* requesting a specific address */
78857 if (addr) {
78858 addr = PAGE_ALIGN(addr);
78859 - vma = find_vma(mm, addr);
78860 - if (TASK_SIZE - len >= addr &&
78861 - (!vma || addr + len <= vma->vm_start))
78862 - return addr;
78863 + if (TASK_SIZE - len >= addr) {
78864 + vma = find_vma(mm, addr);
78865 + if (check_heap_stack_gap(vma, addr, len))
78866 + return addr;
78867 + }
78868 }
78869
78870 /* check if free_area_cache is useful for us */
78871 @@ -1378,7 +1591,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78872 /* make sure it can fit in the remaining address space */
78873 if (addr > len) {
78874 vma = find_vma(mm, addr-len);
78875 - if (!vma || addr <= vma->vm_start)
78876 + if (check_heap_stack_gap(vma, addr - len, len))
78877 /* remember the address as a hint for next time */
78878 return (mm->free_area_cache = addr-len);
78879 }
78880 @@ -1395,7 +1608,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78881 * return with success:
78882 */
78883 vma = find_vma(mm, addr);
78884 - if (!vma || addr+len <= vma->vm_start)
78885 + if (check_heap_stack_gap(vma, addr, len))
78886 /* remember the address as a hint for next time */
78887 return (mm->free_area_cache = addr);
78888
78889 @@ -1404,8 +1617,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78890 mm->cached_hole_size = vma->vm_start - addr;
78891
78892 /* try just below the current vma->vm_start */
78893 - addr = vma->vm_start-len;
78894 - } while (len < vma->vm_start);
78895 + addr = skip_heap_stack_gap(vma, len);
78896 + } while (!IS_ERR_VALUE(addr));
78897
78898 bottomup:
78899 /*
78900 @@ -1414,13 +1627,21 @@ bottomup:
78901 * can happen with large stack limits and large mmap()
78902 * allocations.
78903 */
78904 + mm->mmap_base = TASK_UNMAPPED_BASE;
78905 +
78906 +#ifdef CONFIG_PAX_RANDMMAP
78907 + if (mm->pax_flags & MF_PAX_RANDMMAP)
78908 + mm->mmap_base += mm->delta_mmap;
78909 +#endif
78910 +
78911 + mm->free_area_cache = mm->mmap_base;
78912 mm->cached_hole_size = ~0UL;
78913 - mm->free_area_cache = TASK_UNMAPPED_BASE;
78914 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
78915 /*
78916 * Restore the topdown base:
78917 */
78918 - mm->free_area_cache = mm->mmap_base;
78919 + mm->mmap_base = base;
78920 + mm->free_area_cache = base;
78921 mm->cached_hole_size = ~0UL;
78922
78923 return addr;
78924 @@ -1429,6 +1650,12 @@ bottomup:
78925
78926 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78927 {
78928 +
78929 +#ifdef CONFIG_PAX_SEGMEXEC
78930 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78931 + return;
78932 +#endif
78933 +
78934 /*
78935 * Is this a new hole at the highest possible address?
78936 */
78937 @@ -1436,8 +1663,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78938 mm->free_area_cache = addr;
78939
78940 /* dont allow allocations above current base */
78941 - if (mm->free_area_cache > mm->mmap_base)
78942 + if (mm->free_area_cache > mm->mmap_base) {
78943 mm->free_area_cache = mm->mmap_base;
78944 + mm->cached_hole_size = ~0UL;
78945 + }
78946 }
78947
78948 unsigned long
78949 @@ -1510,40 +1739,49 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
78950
78951 EXPORT_SYMBOL(find_vma);
78952
78953 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
78954 +/*
78955 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
78956 + */
78957 struct vm_area_struct *
78958 find_vma_prev(struct mm_struct *mm, unsigned long addr,
78959 struct vm_area_struct **pprev)
78960 {
78961 - struct vm_area_struct *vma = NULL, *prev = NULL;
78962 - struct rb_node *rb_node;
78963 - if (!mm)
78964 - goto out;
78965 -
78966 - /* Guard against addr being lower than the first VMA */
78967 - vma = mm->mmap;
78968 -
78969 - /* Go through the RB tree quickly. */
78970 - rb_node = mm->mm_rb.rb_node;
78971 -
78972 - while (rb_node) {
78973 - struct vm_area_struct *vma_tmp;
78974 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
78975 -
78976 - if (addr < vma_tmp->vm_end) {
78977 - rb_node = rb_node->rb_left;
78978 - } else {
78979 - prev = vma_tmp;
78980 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
78981 - break;
78982 + struct vm_area_struct *vma;
78983 +
78984 + vma = find_vma(mm, addr);
78985 + if (vma) {
78986 + *pprev = vma->vm_prev;
78987 + } else {
78988 + struct rb_node *rb_node = mm->mm_rb.rb_node;
78989 + *pprev = NULL;
78990 + while (rb_node) {
78991 + *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
78992 rb_node = rb_node->rb_right;
78993 }
78994 }
78995 + return vma;
78996 +}
78997 +
78998 +#ifdef CONFIG_PAX_SEGMEXEC
78999 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
79000 +{
79001 + struct vm_area_struct *vma_m;
79002
79003 -out:
79004 - *pprev = prev;
79005 - return prev ? prev->vm_next : vma;
79006 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
79007 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
79008 + BUG_ON(vma->vm_mirror);
79009 + return NULL;
79010 + }
79011 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
79012 + vma_m = vma->vm_mirror;
79013 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
79014 + BUG_ON(vma->vm_file != vma_m->vm_file);
79015 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
79016 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
79017 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
79018 + return vma_m;
79019 }
79020 +#endif
79021
79022 /*
79023 * Verify that the stack growth is acceptable and
79024 @@ -1561,6 +1799,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
79025 return -ENOMEM;
79026
79027 /* Stack limit test */
79028 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
79029 if (size > rlim[RLIMIT_STACK].rlim_cur)
79030 return -ENOMEM;
79031
79032 @@ -1570,6 +1809,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
79033 unsigned long limit;
79034 locked = mm->locked_vm + grow;
79035 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
79036 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
79037 if (locked > limit && !capable(CAP_IPC_LOCK))
79038 return -ENOMEM;
79039 }
79040 @@ -1600,37 +1840,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
79041 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
79042 * vma is the last one with address > vma->vm_end. Have to extend vma.
79043 */
79044 +#ifndef CONFIG_IA64
79045 +static
79046 +#endif
79047 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
79048 {
79049 int error;
79050 + bool locknext;
79051
79052 if (!(vma->vm_flags & VM_GROWSUP))
79053 return -EFAULT;
79054
79055 + /* Also guard against wrapping around to address 0. */
79056 + if (address < PAGE_ALIGN(address+1))
79057 + address = PAGE_ALIGN(address+1);
79058 + else
79059 + return -ENOMEM;
79060 +
79061 /*
79062 * We must make sure the anon_vma is allocated
79063 * so that the anon_vma locking is not a noop.
79064 */
79065 if (unlikely(anon_vma_prepare(vma)))
79066 return -ENOMEM;
79067 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
79068 + if (locknext && anon_vma_prepare(vma->vm_next))
79069 + return -ENOMEM;
79070 anon_vma_lock(vma);
79071 + if (locknext)
79072 + anon_vma_lock(vma->vm_next);
79073
79074 /*
79075 * vma->vm_start/vm_end cannot change under us because the caller
79076 * is required to hold the mmap_sem in read mode. We need the
79077 - * anon_vma lock to serialize against concurrent expand_stacks.
79078 - * Also guard against wrapping around to address 0.
79079 + * anon_vma locks to serialize against concurrent expand_stacks
79080 + * and expand_upwards.
79081 */
79082 - if (address < PAGE_ALIGN(address+4))
79083 - address = PAGE_ALIGN(address+4);
79084 - else {
79085 - anon_vma_unlock(vma);
79086 - return -ENOMEM;
79087 - }
79088 error = 0;
79089
79090 /* Somebody else might have raced and expanded it already */
79091 - if (address > vma->vm_end) {
79092 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
79093 + error = -ENOMEM;
79094 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
79095 unsigned long size, grow;
79096
79097 size = address - vma->vm_start;
79098 @@ -1643,6 +1894,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
79099 vma->vm_end = address;
79100 }
79101 }
79102 + if (locknext)
79103 + anon_vma_unlock(vma->vm_next);
79104 anon_vma_unlock(vma);
79105 return error;
79106 }
79107 @@ -1655,6 +1908,8 @@ static int expand_downwards(struct vm_area_struct *vma,
79108 unsigned long address)
79109 {
79110 int error;
79111 + bool lockprev = false;
79112 + struct vm_area_struct *prev;
79113
79114 /*
79115 * We must make sure the anon_vma is allocated
79116 @@ -1668,6 +1923,15 @@ static int expand_downwards(struct vm_area_struct *vma,
79117 if (error)
79118 return error;
79119
79120 + prev = vma->vm_prev;
79121 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
79122 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
79123 +#endif
79124 + if (lockprev && anon_vma_prepare(prev))
79125 + return -ENOMEM;
79126 + if (lockprev)
79127 + anon_vma_lock(prev);
79128 +
79129 anon_vma_lock(vma);
79130
79131 /*
79132 @@ -1677,9 +1941,17 @@ static int expand_downwards(struct vm_area_struct *vma,
79133 */
79134
79135 /* Somebody else might have raced and expanded it already */
79136 - if (address < vma->vm_start) {
79137 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
79138 + error = -ENOMEM;
79139 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
79140 unsigned long size, grow;
79141
79142 +#ifdef CONFIG_PAX_SEGMEXEC
79143 + struct vm_area_struct *vma_m;
79144 +
79145 + vma_m = pax_find_mirror_vma(vma);
79146 +#endif
79147 +
79148 size = vma->vm_end - address;
79149 grow = (vma->vm_start - address) >> PAGE_SHIFT;
79150
79151 @@ -1689,10 +1961,22 @@ static int expand_downwards(struct vm_area_struct *vma,
79152 if (!error) {
79153 vma->vm_start = address;
79154 vma->vm_pgoff -= grow;
79155 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
79156 +
79157 +#ifdef CONFIG_PAX_SEGMEXEC
79158 + if (vma_m) {
79159 + vma_m->vm_start -= grow << PAGE_SHIFT;
79160 + vma_m->vm_pgoff -= grow;
79161 + }
79162 +#endif
79163 +
79164 +
79165 }
79166 }
79167 }
79168 anon_vma_unlock(vma);
79169 + if (lockprev)
79170 + anon_vma_unlock(prev);
79171 return error;
79172 }
79173
79174 @@ -1768,6 +2052,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
79175 do {
79176 long nrpages = vma_pages(vma);
79177
79178 +#ifdef CONFIG_PAX_SEGMEXEC
79179 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
79180 + vma = remove_vma(vma);
79181 + continue;
79182 + }
79183 +#endif
79184 +
79185 mm->total_vm -= nrpages;
79186 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
79187 vma = remove_vma(vma);
79188 @@ -1813,6 +2104,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
79189 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
79190 vma->vm_prev = NULL;
79191 do {
79192 +
79193 +#ifdef CONFIG_PAX_SEGMEXEC
79194 + if (vma->vm_mirror) {
79195 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
79196 + vma->vm_mirror->vm_mirror = NULL;
79197 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
79198 + vma->vm_mirror = NULL;
79199 + }
79200 +#endif
79201 +
79202 rb_erase(&vma->vm_rb, &mm->mm_rb);
79203 mm->map_count--;
79204 tail_vma = vma;
79205 @@ -1840,10 +2141,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79206 struct mempolicy *pol;
79207 struct vm_area_struct *new;
79208
79209 +#ifdef CONFIG_PAX_SEGMEXEC
79210 + struct vm_area_struct *vma_m, *new_m = NULL;
79211 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
79212 +#endif
79213 +
79214 if (is_vm_hugetlb_page(vma) && (addr &
79215 ~(huge_page_mask(hstate_vma(vma)))))
79216 return -EINVAL;
79217
79218 +#ifdef CONFIG_PAX_SEGMEXEC
79219 + vma_m = pax_find_mirror_vma(vma);
79220 +
79221 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
79222 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
79223 + if (mm->map_count >= sysctl_max_map_count-1)
79224 + return -ENOMEM;
79225 + } else
79226 +#endif
79227 +
79228 if (mm->map_count >= sysctl_max_map_count)
79229 return -ENOMEM;
79230
79231 @@ -1851,6 +2167,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79232 if (!new)
79233 return -ENOMEM;
79234
79235 +#ifdef CONFIG_PAX_SEGMEXEC
79236 + if (vma_m) {
79237 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
79238 + if (!new_m) {
79239 + kmem_cache_free(vm_area_cachep, new);
79240 + return -ENOMEM;
79241 + }
79242 + }
79243 +#endif
79244 +
79245 /* most fields are the same, copy all, and then fixup */
79246 *new = *vma;
79247
79248 @@ -1861,8 +2187,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79249 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
79250 }
79251
79252 +#ifdef CONFIG_PAX_SEGMEXEC
79253 + if (vma_m) {
79254 + *new_m = *vma_m;
79255 + new_m->vm_mirror = new;
79256 + new->vm_mirror = new_m;
79257 +
79258 + if (new_below)
79259 + new_m->vm_end = addr_m;
79260 + else {
79261 + new_m->vm_start = addr_m;
79262 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
79263 + }
79264 + }
79265 +#endif
79266 +
79267 pol = mpol_dup(vma_policy(vma));
79268 if (IS_ERR(pol)) {
79269 +
79270 +#ifdef CONFIG_PAX_SEGMEXEC
79271 + if (new_m)
79272 + kmem_cache_free(vm_area_cachep, new_m);
79273 +#endif
79274 +
79275 kmem_cache_free(vm_area_cachep, new);
79276 return PTR_ERR(pol);
79277 }
79278 @@ -1883,6 +2230,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79279 else
79280 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
79281
79282 +#ifdef CONFIG_PAX_SEGMEXEC
79283 + if (vma_m) {
79284 + mpol_get(pol);
79285 + vma_set_policy(new_m, pol);
79286 +
79287 + if (new_m->vm_file) {
79288 + get_file(new_m->vm_file);
79289 + if (vma_m->vm_flags & VM_EXECUTABLE)
79290 + added_exe_file_vma(mm);
79291 + }
79292 +
79293 + if (new_m->vm_ops && new_m->vm_ops->open)
79294 + new_m->vm_ops->open(new_m);
79295 +
79296 + if (new_below)
79297 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
79298 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
79299 + else
79300 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
79301 + }
79302 +#endif
79303 +
79304 return 0;
79305 }
79306
79307 @@ -1891,11 +2260,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79308 * work. This now handles partial unmappings.
79309 * Jeremy Fitzhardinge <jeremy@goop.org>
79310 */
79311 +#ifdef CONFIG_PAX_SEGMEXEC
79312 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79313 {
79314 + int ret = __do_munmap(mm, start, len);
79315 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
79316 + return ret;
79317 +
79318 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
79319 +}
79320 +
79321 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79322 +#else
79323 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79324 +#endif
79325 +{
79326 unsigned long end;
79327 struct vm_area_struct *vma, *prev, *last;
79328
79329 + /*
79330 + * mm->mmap_sem is required to protect against another thread
79331 + * changing the mappings in case we sleep.
79332 + */
79333 + verify_mm_writelocked(mm);
79334 +
79335 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
79336 return -EINVAL;
79337
79338 @@ -1959,6 +2347,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79339 /* Fix up all other VM information */
79340 remove_vma_list(mm, vma);
79341
79342 + track_exec_limit(mm, start, end, 0UL);
79343 +
79344 return 0;
79345 }
79346
79347 @@ -1971,22 +2361,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
79348
79349 profile_munmap(addr);
79350
79351 +#ifdef CONFIG_PAX_SEGMEXEC
79352 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
79353 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
79354 + return -EINVAL;
79355 +#endif
79356 +
79357 down_write(&mm->mmap_sem);
79358 ret = do_munmap(mm, addr, len);
79359 up_write(&mm->mmap_sem);
79360 return ret;
79361 }
79362
79363 -static inline void verify_mm_writelocked(struct mm_struct *mm)
79364 -{
79365 -#ifdef CONFIG_DEBUG_VM
79366 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
79367 - WARN_ON(1);
79368 - up_read(&mm->mmap_sem);
79369 - }
79370 -#endif
79371 -}
79372 -
79373 /*
79374 * this is really a simplified "do_mmap". it only handles
79375 * anonymous maps. eventually we may be able to do some
79376 @@ -2000,6 +2386,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79377 struct rb_node ** rb_link, * rb_parent;
79378 pgoff_t pgoff = addr >> PAGE_SHIFT;
79379 int error;
79380 + unsigned long charged;
79381
79382 len = PAGE_ALIGN(len);
79383 if (!len)
79384 @@ -2011,16 +2398,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79385
79386 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
79387
79388 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
79389 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
79390 + flags &= ~VM_EXEC;
79391 +
79392 +#ifdef CONFIG_PAX_MPROTECT
79393 + if (mm->pax_flags & MF_PAX_MPROTECT)
79394 + flags &= ~VM_MAYEXEC;
79395 +#endif
79396 +
79397 + }
79398 +#endif
79399 +
79400 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
79401 if (error & ~PAGE_MASK)
79402 return error;
79403
79404 + charged = len >> PAGE_SHIFT;
79405 +
79406 /*
79407 * mlock MCL_FUTURE?
79408 */
79409 if (mm->def_flags & VM_LOCKED) {
79410 unsigned long locked, lock_limit;
79411 - locked = len >> PAGE_SHIFT;
79412 + locked = charged;
79413 locked += mm->locked_vm;
79414 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
79415 lock_limit >>= PAGE_SHIFT;
79416 @@ -2037,22 +2438,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79417 /*
79418 * Clear old maps. this also does some error checking for us
79419 */
79420 - munmap_back:
79421 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79422 if (vma && vma->vm_start < addr + len) {
79423 if (do_munmap(mm, addr, len))
79424 return -ENOMEM;
79425 - goto munmap_back;
79426 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79427 + BUG_ON(vma && vma->vm_start < addr + len);
79428 }
79429
79430 /* Check against address space limits *after* clearing old maps... */
79431 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
79432 + if (!may_expand_vm(mm, charged))
79433 return -ENOMEM;
79434
79435 if (mm->map_count > sysctl_max_map_count)
79436 return -ENOMEM;
79437
79438 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
79439 + if (security_vm_enough_memory(charged))
79440 return -ENOMEM;
79441
79442 /* Can we just expand an old private anonymous mapping? */
79443 @@ -2066,7 +2467,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79444 */
79445 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79446 if (!vma) {
79447 - vm_unacct_memory(len >> PAGE_SHIFT);
79448 + vm_unacct_memory(charged);
79449 return -ENOMEM;
79450 }
79451
79452 @@ -2078,11 +2479,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79453 vma->vm_page_prot = vm_get_page_prot(flags);
79454 vma_link(mm, vma, prev, rb_link, rb_parent);
79455 out:
79456 - mm->total_vm += len >> PAGE_SHIFT;
79457 + mm->total_vm += charged;
79458 if (flags & VM_LOCKED) {
79459 if (!mlock_vma_pages_range(vma, addr, addr + len))
79460 - mm->locked_vm += (len >> PAGE_SHIFT);
79461 + mm->locked_vm += charged;
79462 }
79463 + track_exec_limit(mm, addr, addr + len, flags);
79464 return addr;
79465 }
79466
79467 @@ -2129,8 +2531,10 @@ void exit_mmap(struct mm_struct *mm)
79468 * Walk the list again, actually closing and freeing it,
79469 * with preemption enabled, without holding any MM locks.
79470 */
79471 - while (vma)
79472 + while (vma) {
79473 + vma->vm_mirror = NULL;
79474 vma = remove_vma(vma);
79475 + }
79476
79477 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
79478 }
79479 @@ -2144,6 +2548,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
79480 struct vm_area_struct * __vma, * prev;
79481 struct rb_node ** rb_link, * rb_parent;
79482
79483 +#ifdef CONFIG_PAX_SEGMEXEC
79484 + struct vm_area_struct *vma_m = NULL;
79485 +#endif
79486 +
79487 /*
79488 * The vm_pgoff of a purely anonymous vma should be irrelevant
79489 * until its first write fault, when page's anon_vma and index
79490 @@ -2166,7 +2574,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
79491 if ((vma->vm_flags & VM_ACCOUNT) &&
79492 security_vm_enough_memory_mm(mm, vma_pages(vma)))
79493 return -ENOMEM;
79494 +
79495 +#ifdef CONFIG_PAX_SEGMEXEC
79496 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
79497 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79498 + if (!vma_m)
79499 + return -ENOMEM;
79500 + }
79501 +#endif
79502 +
79503 vma_link(mm, vma, prev, rb_link, rb_parent);
79504 +
79505 +#ifdef CONFIG_PAX_SEGMEXEC
79506 + if (vma_m)
79507 + pax_mirror_vma(vma_m, vma);
79508 +#endif
79509 +
79510 return 0;
79511 }
79512
79513 @@ -2184,6 +2607,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
79514 struct rb_node **rb_link, *rb_parent;
79515 struct mempolicy *pol;
79516
79517 + BUG_ON(vma->vm_mirror);
79518 +
79519 /*
79520 * If anonymous vma has not yet been faulted, update new pgoff
79521 * to match new location, to increase its chance of merging.
79522 @@ -2227,6 +2652,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
79523 return new_vma;
79524 }
79525
79526 +#ifdef CONFIG_PAX_SEGMEXEC
79527 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
79528 +{
79529 + struct vm_area_struct *prev_m;
79530 + struct rb_node **rb_link_m, *rb_parent_m;
79531 + struct mempolicy *pol_m;
79532 +
79533 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
79534 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
79535 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
79536 + *vma_m = *vma;
79537 + pol_m = vma_policy(vma_m);
79538 + mpol_get(pol_m);
79539 + vma_set_policy(vma_m, pol_m);
79540 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
79541 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
79542 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
79543 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
79544 + if (vma_m->vm_file)
79545 + get_file(vma_m->vm_file);
79546 + if (vma_m->vm_ops && vma_m->vm_ops->open)
79547 + vma_m->vm_ops->open(vma_m);
79548 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
79549 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
79550 + vma_m->vm_mirror = vma;
79551 + vma->vm_mirror = vma_m;
79552 +}
79553 +#endif
79554 +
79555 /*
79556 * Return true if the calling process may expand its vm space by the passed
79557 * number of pages
79558 @@ -2237,7 +2691,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
79559 unsigned long lim;
79560
79561 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
79562 -
79563 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
79564 if (cur + npages > lim)
79565 return 0;
79566 return 1;
79567 @@ -2307,6 +2761,22 @@ int install_special_mapping(struct mm_struct *mm,
79568 vma->vm_start = addr;
79569 vma->vm_end = addr + len;
79570
79571 +#ifdef CONFIG_PAX_MPROTECT
79572 + if (mm->pax_flags & MF_PAX_MPROTECT) {
79573 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
79574 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
79575 + return -EPERM;
79576 + if (!(vm_flags & VM_EXEC))
79577 + vm_flags &= ~VM_MAYEXEC;
79578 +#else
79579 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
79580 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
79581 +#endif
79582 + else
79583 + vm_flags &= ~VM_MAYWRITE;
79584 + }
79585 +#endif
79586 +
79587 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
79588 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
79589
79590 diff --git a/mm/mprotect.c b/mm/mprotect.c
79591 index 1737c7e..c7faeb4 100644
79592 --- a/mm/mprotect.c
79593 +++ b/mm/mprotect.c
79594 @@ -24,10 +24,16 @@
79595 #include <linux/mmu_notifier.h>
79596 #include <linux/migrate.h>
79597 #include <linux/perf_event.h>
79598 +
79599 +#ifdef CONFIG_PAX_MPROTECT
79600 +#include <linux/elf.h>
79601 +#endif
79602 +
79603 #include <asm/uaccess.h>
79604 #include <asm/pgtable.h>
79605 #include <asm/cacheflush.h>
79606 #include <asm/tlbflush.h>
79607 +#include <asm/mmu_context.h>
79608
79609 #ifndef pgprot_modify
79610 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
79611 @@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
79612 flush_tlb_range(vma, start, end);
79613 }
79614
79615 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79616 +/* called while holding the mmap semaphor for writing except stack expansion */
79617 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
79618 +{
79619 + unsigned long oldlimit, newlimit = 0UL;
79620 +
79621 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
79622 + return;
79623 +
79624 + spin_lock(&mm->page_table_lock);
79625 + oldlimit = mm->context.user_cs_limit;
79626 + if ((prot & VM_EXEC) && oldlimit < end)
79627 + /* USER_CS limit moved up */
79628 + newlimit = end;
79629 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
79630 + /* USER_CS limit moved down */
79631 + newlimit = start;
79632 +
79633 + if (newlimit) {
79634 + mm->context.user_cs_limit = newlimit;
79635 +
79636 +#ifdef CONFIG_SMP
79637 + wmb();
79638 + cpus_clear(mm->context.cpu_user_cs_mask);
79639 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
79640 +#endif
79641 +
79642 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
79643 + }
79644 + spin_unlock(&mm->page_table_lock);
79645 + if (newlimit == end) {
79646 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
79647 +
79648 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
79649 + if (is_vm_hugetlb_page(vma))
79650 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
79651 + else
79652 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
79653 + }
79654 +}
79655 +#endif
79656 +
79657 int
79658 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79659 unsigned long start, unsigned long end, unsigned long newflags)
79660 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79661 int error;
79662 int dirty_accountable = 0;
79663
79664 +#ifdef CONFIG_PAX_SEGMEXEC
79665 + struct vm_area_struct *vma_m = NULL;
79666 + unsigned long start_m, end_m;
79667 +
79668 + start_m = start + SEGMEXEC_TASK_SIZE;
79669 + end_m = end + SEGMEXEC_TASK_SIZE;
79670 +#endif
79671 +
79672 if (newflags == oldflags) {
79673 *pprev = vma;
79674 return 0;
79675 }
79676
79677 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
79678 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
79679 +
79680 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
79681 + return -ENOMEM;
79682 +
79683 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
79684 + return -ENOMEM;
79685 + }
79686 +
79687 /*
79688 * If we make a private mapping writable we increase our commit;
79689 * but (without finer accounting) cannot reduce our commit if we
79690 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79691 }
79692 }
79693
79694 +#ifdef CONFIG_PAX_SEGMEXEC
79695 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
79696 + if (start != vma->vm_start) {
79697 + error = split_vma(mm, vma, start, 1);
79698 + if (error)
79699 + goto fail;
79700 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
79701 + *pprev = (*pprev)->vm_next;
79702 + }
79703 +
79704 + if (end != vma->vm_end) {
79705 + error = split_vma(mm, vma, end, 0);
79706 + if (error)
79707 + goto fail;
79708 + }
79709 +
79710 + if (pax_find_mirror_vma(vma)) {
79711 + error = __do_munmap(mm, start_m, end_m - start_m);
79712 + if (error)
79713 + goto fail;
79714 + } else {
79715 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79716 + if (!vma_m) {
79717 + error = -ENOMEM;
79718 + goto fail;
79719 + }
79720 + vma->vm_flags = newflags;
79721 + pax_mirror_vma(vma_m, vma);
79722 + }
79723 + }
79724 +#endif
79725 +
79726 /*
79727 * First try to merge with previous and/or next vma.
79728 */
79729 @@ -195,9 +293,21 @@ success:
79730 * vm_flags and vm_page_prot are protected by the mmap_sem
79731 * held in write mode.
79732 */
79733 +
79734 +#ifdef CONFIG_PAX_SEGMEXEC
79735 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
79736 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
79737 +#endif
79738 +
79739 vma->vm_flags = newflags;
79740 +
79741 +#ifdef CONFIG_PAX_MPROTECT
79742 + if (mm->binfmt && mm->binfmt->handle_mprotect)
79743 + mm->binfmt->handle_mprotect(vma, newflags);
79744 +#endif
79745 +
79746 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
79747 - vm_get_page_prot(newflags));
79748 + vm_get_page_prot(vma->vm_flags));
79749
79750 if (vma_wants_writenotify(vma)) {
79751 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
79752 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79753 end = start + len;
79754 if (end <= start)
79755 return -ENOMEM;
79756 +
79757 +#ifdef CONFIG_PAX_SEGMEXEC
79758 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
79759 + if (end > SEGMEXEC_TASK_SIZE)
79760 + return -EINVAL;
79761 + } else
79762 +#endif
79763 +
79764 + if (end > TASK_SIZE)
79765 + return -EINVAL;
79766 +
79767 if (!arch_validate_prot(prot))
79768 return -EINVAL;
79769
79770 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79771 /*
79772 * Does the application expect PROT_READ to imply PROT_EXEC:
79773 */
79774 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
79775 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
79776 prot |= PROT_EXEC;
79777
79778 vm_flags = calc_vm_prot_bits(prot);
79779 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79780 if (start > vma->vm_start)
79781 prev = vma;
79782
79783 +#ifdef CONFIG_PAX_MPROTECT
79784 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
79785 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
79786 +#endif
79787 +
79788 for (nstart = start ; ; ) {
79789 unsigned long newflags;
79790
79791 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79792
79793 /* newflags >> 4 shift VM_MAY% in place of VM_% */
79794 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
79795 + if (prot & (PROT_WRITE | PROT_EXEC))
79796 + gr_log_rwxmprotect(vma->vm_file);
79797 +
79798 + error = -EACCES;
79799 + goto out;
79800 + }
79801 +
79802 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
79803 error = -EACCES;
79804 goto out;
79805 }
79806 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79807 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
79808 if (error)
79809 goto out;
79810 +
79811 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
79812 +
79813 nstart = tmp;
79814
79815 if (nstart < prev->vm_end)
79816 diff --git a/mm/mremap.c b/mm/mremap.c
79817 index 3e98d79..1706cec 100644
79818 --- a/mm/mremap.c
79819 +++ b/mm/mremap.c
79820 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
79821 continue;
79822 pte = ptep_clear_flush(vma, old_addr, old_pte);
79823 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
79824 +
79825 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79826 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
79827 + pte = pte_exprotect(pte);
79828 +#endif
79829 +
79830 set_pte_at(mm, new_addr, new_pte, pte);
79831 }
79832
79833 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
79834 if (is_vm_hugetlb_page(vma))
79835 goto Einval;
79836
79837 +#ifdef CONFIG_PAX_SEGMEXEC
79838 + if (pax_find_mirror_vma(vma))
79839 + goto Einval;
79840 +#endif
79841 +
79842 /* We can't remap across vm area boundaries */
79843 if (old_len > vma->vm_end - addr)
79844 goto Efault;
79845 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
79846 unsigned long ret = -EINVAL;
79847 unsigned long charged = 0;
79848 unsigned long map_flags;
79849 + unsigned long pax_task_size = TASK_SIZE;
79850
79851 if (new_addr & ~PAGE_MASK)
79852 goto out;
79853
79854 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
79855 +#ifdef CONFIG_PAX_SEGMEXEC
79856 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
79857 + pax_task_size = SEGMEXEC_TASK_SIZE;
79858 +#endif
79859 +
79860 + pax_task_size -= PAGE_SIZE;
79861 +
79862 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
79863 goto out;
79864
79865 /* Check if the location we're moving into overlaps the
79866 * old location at all, and fail if it does.
79867 */
79868 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
79869 - goto out;
79870 -
79871 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
79872 + if (addr + old_len > new_addr && new_addr + new_len > addr)
79873 goto out;
79874
79875 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79876 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
79877 struct vm_area_struct *vma;
79878 unsigned long ret = -EINVAL;
79879 unsigned long charged = 0;
79880 + unsigned long pax_task_size = TASK_SIZE;
79881
79882 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
79883 goto out;
79884 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
79885 if (!new_len)
79886 goto out;
79887
79888 +#ifdef CONFIG_PAX_SEGMEXEC
79889 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
79890 + pax_task_size = SEGMEXEC_TASK_SIZE;
79891 +#endif
79892 +
79893 + pax_task_size -= PAGE_SIZE;
79894 +
79895 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
79896 + old_len > pax_task_size || addr > pax_task_size-old_len)
79897 + goto out;
79898 +
79899 if (flags & MREMAP_FIXED) {
79900 if (flags & MREMAP_MAYMOVE)
79901 ret = mremap_to(addr, old_len, new_addr, new_len);
79902 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
79903 addr + new_len);
79904 }
79905 ret = addr;
79906 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
79907 goto out;
79908 }
79909 }
79910 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
79911 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79912 if (ret)
79913 goto out;
79914 +
79915 + map_flags = vma->vm_flags;
79916 ret = move_vma(vma, addr, old_len, new_len, new_addr);
79917 + if (!(ret & ~PAGE_MASK)) {
79918 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
79919 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
79920 + }
79921 }
79922 out:
79923 if (ret & ~PAGE_MASK)
79924 diff --git a/mm/nommu.c b/mm/nommu.c
79925 index 406e8d4..53970d3 100644
79926 --- a/mm/nommu.c
79927 +++ b/mm/nommu.c
79928 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
79929 int sysctl_overcommit_ratio = 50; /* default is 50% */
79930 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
79931 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
79932 -int heap_stack_gap = 0;
79933
79934 atomic_long_t mmap_pages_allocated;
79935
79936 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
79937 EXPORT_SYMBOL(find_vma);
79938
79939 /*
79940 - * find a VMA
79941 - * - we don't extend stack VMAs under NOMMU conditions
79942 - */
79943 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
79944 -{
79945 - return find_vma(mm, addr);
79946 -}
79947 -
79948 -/*
79949 * expand a stack to a given address
79950 * - not supported under NOMMU conditions
79951 */
79952 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
79953 index 3ecab7e..594a471 100644
79954 --- a/mm/page_alloc.c
79955 +++ b/mm/page_alloc.c
79956 @@ -289,7 +289,7 @@ out:
79957 * This usage means that zero-order pages may not be compound.
79958 */
79959
79960 -static void free_compound_page(struct page *page)
79961 +void free_compound_page(struct page *page)
79962 {
79963 __free_pages_ok(page, compound_order(page));
79964 }
79965 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79966 int bad = 0;
79967 int wasMlocked = __TestClearPageMlocked(page);
79968
79969 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79970 + unsigned long index = 1UL << order;
79971 +#endif
79972 +
79973 kmemcheck_free_shadow(page, order);
79974
79975 for (i = 0 ; i < (1 << order) ; ++i)
79976 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79977 debug_check_no_obj_freed(page_address(page),
79978 PAGE_SIZE << order);
79979 }
79980 +
79981 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79982 + for (; index; --index)
79983 + sanitize_highpage(page + index - 1);
79984 +#endif
79985 +
79986 arch_free_page(page, order);
79987 kernel_map_pages(page, 1 << order, 0);
79988
79989 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
79990 arch_alloc_page(page, order);
79991 kernel_map_pages(page, 1 << order, 1);
79992
79993 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
79994 if (gfp_flags & __GFP_ZERO)
79995 prep_zero_page(page, order, gfp_flags);
79996 +#endif
79997
79998 if (order && (gfp_flags & __GFP_COMP))
79999 prep_compound_page(page, order);
80000 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
80001 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
80002 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
80003 }
80004 +
80005 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
80006 + sanitize_highpage(page);
80007 +#endif
80008 +
80009 arch_free_page(page, 0);
80010 kernel_map_pages(page, 1, 0);
80011
80012 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
80013 int cpu;
80014 struct zone *zone;
80015
80016 + pax_track_stack();
80017 +
80018 for_each_populated_zone(zone) {
80019 show_node(zone);
80020 printk("%s per-cpu:\n", zone->name);
80021 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
80022 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
80023 }
80024 #else
80025 -static void inline setup_usemap(struct pglist_data *pgdat,
80026 +static inline void setup_usemap(struct pglist_data *pgdat,
80027 struct zone *zone, unsigned long zonesize) {}
80028 #endif /* CONFIG_SPARSEMEM */
80029
80030 diff --git a/mm/percpu.c b/mm/percpu.c
80031 index c90614a..5f7b7b8 100644
80032 --- a/mm/percpu.c
80033 +++ b/mm/percpu.c
80034 @@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
80035 static unsigned int pcpu_high_unit_cpu __read_mostly;
80036
80037 /* the address of the first chunk which starts with the kernel static area */
80038 -void *pcpu_base_addr __read_mostly;
80039 +void *pcpu_base_addr __read_only;
80040 EXPORT_SYMBOL_GPL(pcpu_base_addr);
80041
80042 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
80043 diff --git a/mm/rmap.c b/mm/rmap.c
80044 index dd43373..d848cd7 100644
80045 --- a/mm/rmap.c
80046 +++ b/mm/rmap.c
80047 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
80048 /* page_table_lock to protect against threads */
80049 spin_lock(&mm->page_table_lock);
80050 if (likely(!vma->anon_vma)) {
80051 +
80052 +#ifdef CONFIG_PAX_SEGMEXEC
80053 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
80054 +
80055 + if (vma_m) {
80056 + BUG_ON(vma_m->anon_vma);
80057 + vma_m->anon_vma = anon_vma;
80058 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
80059 + }
80060 +#endif
80061 +
80062 vma->anon_vma = anon_vma;
80063 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
80064 allocated = NULL;
80065 diff --git a/mm/shmem.c b/mm/shmem.c
80066 index 3e0005b..1d659a8 100644
80067 --- a/mm/shmem.c
80068 +++ b/mm/shmem.c
80069 @@ -31,7 +31,7 @@
80070 #include <linux/swap.h>
80071 #include <linux/ima.h>
80072
80073 -static struct vfsmount *shm_mnt;
80074 +struct vfsmount *shm_mnt;
80075
80076 #ifdef CONFIG_SHMEM
80077 /*
80078 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
80079 goto unlock;
80080 }
80081 entry = shmem_swp_entry(info, index, NULL);
80082 + if (!entry)
80083 + goto unlock;
80084 if (entry->val) {
80085 /*
80086 * The more uptodate page coming down from a stacked
80087 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
80088 struct vm_area_struct pvma;
80089 struct page *page;
80090
80091 + pax_track_stack();
80092 +
80093 spol = mpol_cond_copy(&mpol,
80094 mpol_shared_policy_lookup(&info->policy, idx));
80095
80096 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
80097
80098 info = SHMEM_I(inode);
80099 inode->i_size = len-1;
80100 - if (len <= (char *)inode - (char *)info) {
80101 + if (len <= (char *)inode - (char *)info && len <= 64) {
80102 /* do it inline */
80103 memcpy(info, symname, len);
80104 inode->i_op = &shmem_symlink_inline_operations;
80105 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
80106 int err = -ENOMEM;
80107
80108 /* Round up to L1_CACHE_BYTES to resist false sharing */
80109 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
80110 - L1_CACHE_BYTES), GFP_KERNEL);
80111 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
80112 if (!sbinfo)
80113 return -ENOMEM;
80114
80115 diff --git a/mm/slab.c b/mm/slab.c
80116 index c8d466a..909e01e 100644
80117 --- a/mm/slab.c
80118 +++ b/mm/slab.c
80119 @@ -174,7 +174,7 @@
80120
80121 /* Legal flag mask for kmem_cache_create(). */
80122 #if DEBUG
80123 -# define CREATE_MASK (SLAB_RED_ZONE | \
80124 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
80125 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
80126 SLAB_CACHE_DMA | \
80127 SLAB_STORE_USER | \
80128 @@ -182,7 +182,7 @@
80129 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
80130 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
80131 #else
80132 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
80133 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
80134 SLAB_CACHE_DMA | \
80135 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
80136 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
80137 @@ -308,7 +308,7 @@ struct kmem_list3 {
80138 * Need this for bootstrapping a per node allocator.
80139 */
80140 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
80141 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
80142 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
80143 #define CACHE_CACHE 0
80144 #define SIZE_AC MAX_NUMNODES
80145 #define SIZE_L3 (2 * MAX_NUMNODES)
80146 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
80147 if ((x)->max_freeable < i) \
80148 (x)->max_freeable = i; \
80149 } while (0)
80150 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
80151 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
80152 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
80153 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
80154 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
80155 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
80156 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
80157 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
80158 #else
80159 #define STATS_INC_ACTIVE(x) do { } while (0)
80160 #define STATS_DEC_ACTIVE(x) do { } while (0)
80161 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
80162 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
80163 */
80164 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
80165 - const struct slab *slab, void *obj)
80166 + const struct slab *slab, const void *obj)
80167 {
80168 u32 offset = (obj - slab->s_mem);
80169 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
80170 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
80171 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
80172 sizes[INDEX_AC].cs_size,
80173 ARCH_KMALLOC_MINALIGN,
80174 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
80175 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
80176 NULL);
80177
80178 if (INDEX_AC != INDEX_L3) {
80179 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
80180 kmem_cache_create(names[INDEX_L3].name,
80181 sizes[INDEX_L3].cs_size,
80182 ARCH_KMALLOC_MINALIGN,
80183 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
80184 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
80185 NULL);
80186 }
80187
80188 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
80189 sizes->cs_cachep = kmem_cache_create(names->name,
80190 sizes->cs_size,
80191 ARCH_KMALLOC_MINALIGN,
80192 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
80193 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
80194 NULL);
80195 }
80196 #ifdef CONFIG_ZONE_DMA
80197 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
80198 }
80199 /* cpu stats */
80200 {
80201 - unsigned long allochit = atomic_read(&cachep->allochit);
80202 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
80203 - unsigned long freehit = atomic_read(&cachep->freehit);
80204 - unsigned long freemiss = atomic_read(&cachep->freemiss);
80205 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
80206 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
80207 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
80208 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
80209
80210 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
80211 allochit, allocmiss, freehit, freemiss);
80212 @@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
80213
80214 static int __init slab_proc_init(void)
80215 {
80216 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
80217 + mode_t gr_mode = S_IRUGO;
80218 +
80219 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
80220 + gr_mode = S_IRUSR;
80221 +#endif
80222 +
80223 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
80224 #ifdef CONFIG_DEBUG_SLAB_LEAK
80225 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
80226 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
80227 #endif
80228 return 0;
80229 }
80230 module_init(slab_proc_init);
80231 #endif
80232
80233 +void check_object_size(const void *ptr, unsigned long n, bool to)
80234 +{
80235 +
80236 +#ifdef CONFIG_PAX_USERCOPY
80237 + struct page *page;
80238 + struct kmem_cache *cachep = NULL;
80239 + struct slab *slabp;
80240 + unsigned int objnr;
80241 + unsigned long offset;
80242 + const char *type;
80243 +
80244 + if (!n)
80245 + return;
80246 +
80247 + type = "<null>";
80248 + if (ZERO_OR_NULL_PTR(ptr))
80249 + goto report;
80250 +
80251 + if (!virt_addr_valid(ptr))
80252 + return;
80253 +
80254 + page = virt_to_head_page(ptr);
80255 +
80256 + type = "<process stack>";
80257 + if (!PageSlab(page)) {
80258 + if (object_is_on_stack(ptr, n) == -1)
80259 + goto report;
80260 + return;
80261 + }
80262 +
80263 + cachep = page_get_cache(page);
80264 + type = cachep->name;
80265 + if (!(cachep->flags & SLAB_USERCOPY))
80266 + goto report;
80267 +
80268 + slabp = page_get_slab(page);
80269 + objnr = obj_to_index(cachep, slabp, ptr);
80270 + BUG_ON(objnr >= cachep->num);
80271 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
80272 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
80273 + return;
80274 +
80275 +report:
80276 + pax_report_usercopy(ptr, n, to, type);
80277 +#endif
80278 +
80279 +}
80280 +EXPORT_SYMBOL(check_object_size);
80281 +
80282 /**
80283 * ksize - get the actual amount of memory allocated for a given object
80284 * @objp: Pointer to the object
80285 diff --git a/mm/slob.c b/mm/slob.c
80286 index 837ebd6..0bd23bc 100644
80287 --- a/mm/slob.c
80288 +++ b/mm/slob.c
80289 @@ -29,7 +29,7 @@
80290 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
80291 * alloc_pages() directly, allocating compound pages so the page order
80292 * does not have to be separately tracked, and also stores the exact
80293 - * allocation size in page->private so that it can be used to accurately
80294 + * allocation size in slob_page->size so that it can be used to accurately
80295 * provide ksize(). These objects are detected in kfree() because slob_page()
80296 * is false for them.
80297 *
80298 @@ -58,6 +58,7 @@
80299 */
80300
80301 #include <linux/kernel.h>
80302 +#include <linux/sched.h>
80303 #include <linux/slab.h>
80304 #include <linux/mm.h>
80305 #include <linux/swap.h> /* struct reclaim_state */
80306 @@ -100,7 +101,8 @@ struct slob_page {
80307 unsigned long flags; /* mandatory */
80308 atomic_t _count; /* mandatory */
80309 slobidx_t units; /* free units left in page */
80310 - unsigned long pad[2];
80311 + unsigned long pad[1];
80312 + unsigned long size; /* size when >=PAGE_SIZE */
80313 slob_t *free; /* first free slob_t in page */
80314 struct list_head list; /* linked list of free pages */
80315 };
80316 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
80317 */
80318 static inline int is_slob_page(struct slob_page *sp)
80319 {
80320 - return PageSlab((struct page *)sp);
80321 + return PageSlab((struct page *)sp) && !sp->size;
80322 }
80323
80324 static inline void set_slob_page(struct slob_page *sp)
80325 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
80326
80327 static inline struct slob_page *slob_page(const void *addr)
80328 {
80329 - return (struct slob_page *)virt_to_page(addr);
80330 + return (struct slob_page *)virt_to_head_page(addr);
80331 }
80332
80333 /*
80334 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
80335 /*
80336 * Return the size of a slob block.
80337 */
80338 -static slobidx_t slob_units(slob_t *s)
80339 +static slobidx_t slob_units(const slob_t *s)
80340 {
80341 if (s->units > 0)
80342 return s->units;
80343 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
80344 /*
80345 * Return the next free slob block pointer after this one.
80346 */
80347 -static slob_t *slob_next(slob_t *s)
80348 +static slob_t *slob_next(const slob_t *s)
80349 {
80350 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
80351 slobidx_t next;
80352 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
80353 /*
80354 * Returns true if s is the last free block in its page.
80355 */
80356 -static int slob_last(slob_t *s)
80357 +static int slob_last(const slob_t *s)
80358 {
80359 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
80360 }
80361 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
80362 if (!page)
80363 return NULL;
80364
80365 + set_slob_page(page);
80366 return page_address(page);
80367 }
80368
80369 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
80370 if (!b)
80371 return NULL;
80372 sp = slob_page(b);
80373 - set_slob_page(sp);
80374
80375 spin_lock_irqsave(&slob_lock, flags);
80376 sp->units = SLOB_UNITS(PAGE_SIZE);
80377 sp->free = b;
80378 + sp->size = 0;
80379 INIT_LIST_HEAD(&sp->list);
80380 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
80381 set_slob_page_free(sp, slob_list);
80382 @@ -475,10 +478,9 @@ out:
80383 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
80384 #endif
80385
80386 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80387 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
80388 {
80389 - unsigned int *m;
80390 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80391 + slob_t *m;
80392 void *ret;
80393
80394 lockdep_trace_alloc(gfp);
80395 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80396
80397 if (!m)
80398 return NULL;
80399 - *m = size;
80400 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
80401 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
80402 + m[0].units = size;
80403 + m[1].units = align;
80404 ret = (void *)m + align;
80405
80406 trace_kmalloc_node(_RET_IP_, ret,
80407 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80408
80409 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
80410 if (ret) {
80411 - struct page *page;
80412 - page = virt_to_page(ret);
80413 - page->private = size;
80414 + struct slob_page *sp;
80415 + sp = slob_page(ret);
80416 + sp->size = size;
80417 }
80418
80419 trace_kmalloc_node(_RET_IP_, ret,
80420 size, PAGE_SIZE << order, gfp, node);
80421 }
80422
80423 - kmemleak_alloc(ret, size, 1, gfp);
80424 + return ret;
80425 +}
80426 +
80427 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80428 +{
80429 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80430 + void *ret = __kmalloc_node_align(size, gfp, node, align);
80431 +
80432 + if (!ZERO_OR_NULL_PTR(ret))
80433 + kmemleak_alloc(ret, size, 1, gfp);
80434 return ret;
80435 }
80436 EXPORT_SYMBOL(__kmalloc_node);
80437 @@ -528,13 +542,92 @@ void kfree(const void *block)
80438 sp = slob_page(block);
80439 if (is_slob_page(sp)) {
80440 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80441 - unsigned int *m = (unsigned int *)(block - align);
80442 - slob_free(m, *m + align);
80443 - } else
80444 + slob_t *m = (slob_t *)(block - align);
80445 + slob_free(m, m[0].units + align);
80446 + } else {
80447 + clear_slob_page(sp);
80448 + free_slob_page(sp);
80449 + sp->size = 0;
80450 put_page(&sp->page);
80451 + }
80452 }
80453 EXPORT_SYMBOL(kfree);
80454
80455 +void check_object_size(const void *ptr, unsigned long n, bool to)
80456 +{
80457 +
80458 +#ifdef CONFIG_PAX_USERCOPY
80459 + struct slob_page *sp;
80460 + const slob_t *free;
80461 + const void *base;
80462 + unsigned long flags;
80463 + const char *type;
80464 +
80465 + if (!n)
80466 + return;
80467 +
80468 + type = "<null>";
80469 + if (ZERO_OR_NULL_PTR(ptr))
80470 + goto report;
80471 +
80472 + if (!virt_addr_valid(ptr))
80473 + return;
80474 +
80475 + type = "<process stack>";
80476 + sp = slob_page(ptr);
80477 + if (!PageSlab((struct page *)sp)) {
80478 + if (object_is_on_stack(ptr, n) == -1)
80479 + goto report;
80480 + return;
80481 + }
80482 +
80483 + type = "<slob>";
80484 + if (sp->size) {
80485 + base = page_address(&sp->page);
80486 + if (base <= ptr && n <= sp->size - (ptr - base))
80487 + return;
80488 + goto report;
80489 + }
80490 +
80491 + /* some tricky double walking to find the chunk */
80492 + spin_lock_irqsave(&slob_lock, flags);
80493 + base = (void *)((unsigned long)ptr & PAGE_MASK);
80494 + free = sp->free;
80495 +
80496 + while (!slob_last(free) && (void *)free <= ptr) {
80497 + base = free + slob_units(free);
80498 + free = slob_next(free);
80499 + }
80500 +
80501 + while (base < (void *)free) {
80502 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
80503 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
80504 + int offset;
80505 +
80506 + if (ptr < base + align)
80507 + break;
80508 +
80509 + offset = ptr - base - align;
80510 + if (offset >= m) {
80511 + base += size;
80512 + continue;
80513 + }
80514 +
80515 + if (n > m - offset)
80516 + break;
80517 +
80518 + spin_unlock_irqrestore(&slob_lock, flags);
80519 + return;
80520 + }
80521 +
80522 + spin_unlock_irqrestore(&slob_lock, flags);
80523 +report:
80524 + pax_report_usercopy(ptr, n, to, type);
80525 +#endif
80526 +
80527 +}
80528 +EXPORT_SYMBOL(check_object_size);
80529 +
80530 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
80531 size_t ksize(const void *block)
80532 {
80533 @@ -547,10 +640,10 @@ size_t ksize(const void *block)
80534 sp = slob_page(block);
80535 if (is_slob_page(sp)) {
80536 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80537 - unsigned int *m = (unsigned int *)(block - align);
80538 - return SLOB_UNITS(*m) * SLOB_UNIT;
80539 + slob_t *m = (slob_t *)(block - align);
80540 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
80541 } else
80542 - return sp->page.private;
80543 + return sp->size;
80544 }
80545 EXPORT_SYMBOL(ksize);
80546
80547 @@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80548 {
80549 struct kmem_cache *c;
80550
80551 +#ifdef CONFIG_PAX_USERCOPY
80552 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
80553 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
80554 +#else
80555 c = slob_alloc(sizeof(struct kmem_cache),
80556 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
80557 +#endif
80558
80559 if (c) {
80560 c->name = name;
80561 @@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
80562 {
80563 void *b;
80564
80565 +#ifdef CONFIG_PAX_USERCOPY
80566 + b = __kmalloc_node_align(c->size, flags, node, c->align);
80567 +#else
80568 if (c->size < PAGE_SIZE) {
80569 b = slob_alloc(c->size, flags, c->align, node);
80570 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
80571 SLOB_UNITS(c->size) * SLOB_UNIT,
80572 flags, node);
80573 } else {
80574 + struct slob_page *sp;
80575 +
80576 b = slob_new_pages(flags, get_order(c->size), node);
80577 + sp = slob_page(b);
80578 + sp->size = c->size;
80579 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
80580 PAGE_SIZE << get_order(c->size),
80581 flags, node);
80582 }
80583 +#endif
80584
80585 if (c->ctor)
80586 c->ctor(b);
80587 @@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
80588
80589 static void __kmem_cache_free(void *b, int size)
80590 {
80591 - if (size < PAGE_SIZE)
80592 + struct slob_page *sp = slob_page(b);
80593 +
80594 + if (is_slob_page(sp))
80595 slob_free(b, size);
80596 - else
80597 + else {
80598 + clear_slob_page(sp);
80599 + free_slob_page(sp);
80600 + sp->size = 0;
80601 slob_free_pages(b, get_order(size));
80602 + }
80603 }
80604
80605 static void kmem_rcu_free(struct rcu_head *head)
80606 @@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
80607
80608 void kmem_cache_free(struct kmem_cache *c, void *b)
80609 {
80610 + int size = c->size;
80611 +
80612 +#ifdef CONFIG_PAX_USERCOPY
80613 + if (size + c->align < PAGE_SIZE) {
80614 + size += c->align;
80615 + b -= c->align;
80616 + }
80617 +#endif
80618 +
80619 kmemleak_free_recursive(b, c->flags);
80620 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
80621 struct slob_rcu *slob_rcu;
80622 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
80623 + slob_rcu = b + (size - sizeof(struct slob_rcu));
80624 INIT_RCU_HEAD(&slob_rcu->head);
80625 - slob_rcu->size = c->size;
80626 + slob_rcu->size = size;
80627 call_rcu(&slob_rcu->head, kmem_rcu_free);
80628 } else {
80629 - __kmem_cache_free(b, c->size);
80630 + __kmem_cache_free(b, size);
80631 }
80632
80633 +#ifdef CONFIG_PAX_USERCOPY
80634 + trace_kfree(_RET_IP_, b);
80635 +#else
80636 trace_kmem_cache_free(_RET_IP_, b);
80637 +#endif
80638 +
80639 }
80640 EXPORT_SYMBOL(kmem_cache_free);
80641
80642 diff --git a/mm/slub.c b/mm/slub.c
80643 index 4996fc7..87e01d0 100644
80644 --- a/mm/slub.c
80645 +++ b/mm/slub.c
80646 @@ -201,7 +201,7 @@ struct track {
80647
80648 enum track_item { TRACK_ALLOC, TRACK_FREE };
80649
80650 -#ifdef CONFIG_SLUB_DEBUG
80651 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80652 static int sysfs_slab_add(struct kmem_cache *);
80653 static int sysfs_slab_alias(struct kmem_cache *, const char *);
80654 static void sysfs_slab_remove(struct kmem_cache *);
80655 @@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
80656 if (!t->addr)
80657 return;
80658
80659 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
80660 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
80661 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
80662 }
80663
80664 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
80665
80666 page = virt_to_head_page(x);
80667
80668 + BUG_ON(!PageSlab(page));
80669 +
80670 slab_free(s, page, x, _RET_IP_);
80671
80672 trace_kmem_cache_free(_RET_IP_, x);
80673 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
80674 * Merge control. If this is set then no merging of slab caches will occur.
80675 * (Could be removed. This was introduced to pacify the merge skeptics.)
80676 */
80677 -static int slub_nomerge;
80678 +static int slub_nomerge = 1;
80679
80680 /*
80681 * Calculate the order of allocation given an slab object size.
80682 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
80683 * list to avoid pounding the page allocator excessively.
80684 */
80685 set_min_partial(s, ilog2(s->size));
80686 - s->refcount = 1;
80687 + atomic_set(&s->refcount, 1);
80688 #ifdef CONFIG_NUMA
80689 s->remote_node_defrag_ratio = 1000;
80690 #endif
80691 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
80692 void kmem_cache_destroy(struct kmem_cache *s)
80693 {
80694 down_write(&slub_lock);
80695 - s->refcount--;
80696 - if (!s->refcount) {
80697 + if (atomic_dec_and_test(&s->refcount)) {
80698 list_del(&s->list);
80699 up_write(&slub_lock);
80700 if (kmem_cache_close(s)) {
80701 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
80702 __setup("slub_nomerge", setup_slub_nomerge);
80703
80704 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
80705 - const char *name, int size, gfp_t gfp_flags)
80706 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
80707 {
80708 - unsigned int flags = 0;
80709 -
80710 if (gfp_flags & SLUB_DMA)
80711 - flags = SLAB_CACHE_DMA;
80712 + flags |= SLAB_CACHE_DMA;
80713
80714 /*
80715 * This function is called with IRQs disabled during early-boot on
80716 @@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
80717 EXPORT_SYMBOL(__kmalloc_node);
80718 #endif
80719
80720 +void check_object_size(const void *ptr, unsigned long n, bool to)
80721 +{
80722 +
80723 +#ifdef CONFIG_PAX_USERCOPY
80724 + struct page *page;
80725 + struct kmem_cache *s = NULL;
80726 + unsigned long offset;
80727 + const char *type;
80728 +
80729 + if (!n)
80730 + return;
80731 +
80732 + type = "<null>";
80733 + if (ZERO_OR_NULL_PTR(ptr))
80734 + goto report;
80735 +
80736 + if (!virt_addr_valid(ptr))
80737 + return;
80738 +
80739 + page = get_object_page(ptr);
80740 +
80741 + type = "<process stack>";
80742 + if (!page) {
80743 + if (object_is_on_stack(ptr, n) == -1)
80744 + goto report;
80745 + return;
80746 + }
80747 +
80748 + s = page->slab;
80749 + type = s->name;
80750 + if (!(s->flags & SLAB_USERCOPY))
80751 + goto report;
80752 +
80753 + offset = (ptr - page_address(page)) % s->size;
80754 + if (offset <= s->objsize && n <= s->objsize - offset)
80755 + return;
80756 +
80757 +report:
80758 + pax_report_usercopy(ptr, n, to, type);
80759 +#endif
80760 +
80761 +}
80762 +EXPORT_SYMBOL(check_object_size);
80763 +
80764 size_t ksize(const void *object)
80765 {
80766 struct page *page;
80767 @@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
80768 * kmem_cache_open for slab_state == DOWN.
80769 */
80770 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
80771 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
80772 - kmalloc_caches[0].refcount = -1;
80773 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
80774 + atomic_set(&kmalloc_caches[0].refcount, -1);
80775 caches++;
80776
80777 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
80778 @@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
80779 /* Caches that are not of the two-to-the-power-of size */
80780 if (KMALLOC_MIN_SIZE <= 32) {
80781 create_kmalloc_cache(&kmalloc_caches[1],
80782 - "kmalloc-96", 96, GFP_NOWAIT);
80783 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
80784 caches++;
80785 }
80786 if (KMALLOC_MIN_SIZE <= 64) {
80787 create_kmalloc_cache(&kmalloc_caches[2],
80788 - "kmalloc-192", 192, GFP_NOWAIT);
80789 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
80790 caches++;
80791 }
80792
80793 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
80794 create_kmalloc_cache(&kmalloc_caches[i],
80795 - "kmalloc", 1 << i, GFP_NOWAIT);
80796 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
80797 caches++;
80798 }
80799
80800 @@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
80801 /*
80802 * We may have set a slab to be unmergeable during bootstrap.
80803 */
80804 - if (s->refcount < 0)
80805 + if (atomic_read(&s->refcount) < 0)
80806 return 1;
80807
80808 return 0;
80809 @@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80810 if (s) {
80811 int cpu;
80812
80813 - s->refcount++;
80814 + atomic_inc(&s->refcount);
80815 /*
80816 * Adjust the object sizes so that we clear
80817 * the complete object on kzalloc.
80818 @@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80819
80820 if (sysfs_slab_alias(s, name)) {
80821 down_write(&slub_lock);
80822 - s->refcount--;
80823 + atomic_dec(&s->refcount);
80824 up_write(&slub_lock);
80825 goto err;
80826 }
80827 @@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
80828
80829 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
80830 {
80831 - return sprintf(buf, "%d\n", s->refcount - 1);
80832 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
80833 }
80834 SLAB_ATTR_RO(aliases);
80835
80836 @@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
80837 kfree(s);
80838 }
80839
80840 -static struct sysfs_ops slab_sysfs_ops = {
80841 +static const struct sysfs_ops slab_sysfs_ops = {
80842 .show = slab_attr_show,
80843 .store = slab_attr_store,
80844 };
80845 @@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
80846 return 0;
80847 }
80848
80849 -static struct kset_uevent_ops slab_uevent_ops = {
80850 +static const struct kset_uevent_ops slab_uevent_ops = {
80851 .filter = uevent_filter,
80852 };
80853
80854 @@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
80855 return name;
80856 }
80857
80858 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80859 static int sysfs_slab_add(struct kmem_cache *s)
80860 {
80861 int err;
80862 @@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
80863 kobject_del(&s->kobj);
80864 kobject_put(&s->kobj);
80865 }
80866 +#endif
80867
80868 /*
80869 * Need to buffer aliases during bootup until sysfs becomes
80870 @@ -4632,6 +4677,7 @@ struct saved_alias {
80871
80872 static struct saved_alias *alias_list;
80873
80874 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80875 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80876 {
80877 struct saved_alias *al;
80878 @@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80879 alias_list = al;
80880 return 0;
80881 }
80882 +#endif
80883
80884 static int __init slab_sysfs_init(void)
80885 {
80886 @@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
80887
80888 static int __init slab_proc_init(void)
80889 {
80890 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
80891 + mode_t gr_mode = S_IRUGO;
80892 +
80893 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
80894 + gr_mode = S_IRUSR;
80895 +#endif
80896 +
80897 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
80898 return 0;
80899 }
80900 module_init(slab_proc_init);
80901 diff --git a/mm/swap.c b/mm/swap.c
80902 index 308e57d..5de19c0 100644
80903 --- a/mm/swap.c
80904 +++ b/mm/swap.c
80905 @@ -30,6 +30,7 @@
80906 #include <linux/notifier.h>
80907 #include <linux/backing-dev.h>
80908 #include <linux/memcontrol.h>
80909 +#include <linux/hugetlb.h>
80910
80911 #include "internal.h"
80912
80913 @@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
80914 compound_page_dtor *dtor;
80915
80916 dtor = get_compound_page_dtor(page);
80917 + if (!PageHuge(page))
80918 + BUG_ON(dtor != free_compound_page);
80919 (*dtor)(page);
80920 }
80921 }
80922 diff --git a/mm/util.c b/mm/util.c
80923 index e48b493..24a601d 100644
80924 --- a/mm/util.c
80925 +++ b/mm/util.c
80926 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
80927 void arch_pick_mmap_layout(struct mm_struct *mm)
80928 {
80929 mm->mmap_base = TASK_UNMAPPED_BASE;
80930 +
80931 +#ifdef CONFIG_PAX_RANDMMAP
80932 + if (mm->pax_flags & MF_PAX_RANDMMAP)
80933 + mm->mmap_base += mm->delta_mmap;
80934 +#endif
80935 +
80936 mm->get_unmapped_area = arch_get_unmapped_area;
80937 mm->unmap_area = arch_unmap_area;
80938 }
80939 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
80940 index f34ffd0..e60c44f 100644
80941 --- a/mm/vmalloc.c
80942 +++ b/mm/vmalloc.c
80943 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
80944
80945 pte = pte_offset_kernel(pmd, addr);
80946 do {
80947 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80948 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80949 +
80950 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80951 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
80952 + BUG_ON(!pte_exec(*pte));
80953 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
80954 + continue;
80955 + }
80956 +#endif
80957 +
80958 + {
80959 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80960 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80961 + }
80962 } while (pte++, addr += PAGE_SIZE, addr != end);
80963 }
80964
80965 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80966 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
80967 {
80968 pte_t *pte;
80969 + int ret = -ENOMEM;
80970
80971 /*
80972 * nr is a running index into the array which helps higher level
80973 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80974 pte = pte_alloc_kernel(pmd, addr);
80975 if (!pte)
80976 return -ENOMEM;
80977 +
80978 + pax_open_kernel();
80979 do {
80980 struct page *page = pages[*nr];
80981
80982 - if (WARN_ON(!pte_none(*pte)))
80983 - return -EBUSY;
80984 - if (WARN_ON(!page))
80985 - return -ENOMEM;
80986 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80987 + if (!(pgprot_val(prot) & _PAGE_NX))
80988 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
80989 + else
80990 +#endif
80991 +
80992 + if (WARN_ON(!pte_none(*pte))) {
80993 + ret = -EBUSY;
80994 + goto out;
80995 + }
80996 + if (WARN_ON(!page)) {
80997 + ret = -ENOMEM;
80998 + goto out;
80999 + }
81000 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
81001 (*nr)++;
81002 } while (pte++, addr += PAGE_SIZE, addr != end);
81003 - return 0;
81004 + ret = 0;
81005 +out:
81006 + pax_close_kernel();
81007 + return ret;
81008 }
81009
81010 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
81011 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
81012 * and fall back on vmalloc() if that fails. Others
81013 * just put it in the vmalloc space.
81014 */
81015 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
81016 +#ifdef CONFIG_MODULES
81017 +#ifdef MODULES_VADDR
81018 unsigned long addr = (unsigned long)x;
81019 if (addr >= MODULES_VADDR && addr < MODULES_END)
81020 return 1;
81021 #endif
81022 +
81023 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
81024 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
81025 + return 1;
81026 +#endif
81027 +
81028 +#endif
81029 +
81030 return is_vmalloc_addr(x);
81031 }
81032
81033 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
81034
81035 if (!pgd_none(*pgd)) {
81036 pud_t *pud = pud_offset(pgd, addr);
81037 +#ifdef CONFIG_X86
81038 + if (!pud_large(*pud))
81039 +#endif
81040 if (!pud_none(*pud)) {
81041 pmd_t *pmd = pmd_offset(pud, addr);
81042 +#ifdef CONFIG_X86
81043 + if (!pmd_large(*pmd))
81044 +#endif
81045 if (!pmd_none(*pmd)) {
81046 pte_t *ptep, pte;
81047
81048 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
81049 struct rb_node *tmp;
81050
81051 while (*p) {
81052 - struct vmap_area *tmp;
81053 + struct vmap_area *varea;
81054
81055 parent = *p;
81056 - tmp = rb_entry(parent, struct vmap_area, rb_node);
81057 - if (va->va_start < tmp->va_end)
81058 + varea = rb_entry(parent, struct vmap_area, rb_node);
81059 + if (va->va_start < varea->va_end)
81060 p = &(*p)->rb_left;
81061 - else if (va->va_end > tmp->va_start)
81062 + else if (va->va_end > varea->va_start)
81063 p = &(*p)->rb_right;
81064 else
81065 BUG();
81066 @@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
81067 struct vm_struct *area;
81068
81069 BUG_ON(in_interrupt());
81070 +
81071 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81072 + if (flags & VM_KERNEXEC) {
81073 + if (start != VMALLOC_START || end != VMALLOC_END)
81074 + return NULL;
81075 + start = (unsigned long)MODULES_EXEC_VADDR;
81076 + end = (unsigned long)MODULES_EXEC_END;
81077 + }
81078 +#endif
81079 +
81080 if (flags & VM_IOREMAP) {
81081 int bit = fls(size);
81082
81083 @@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
81084 if (count > totalram_pages)
81085 return NULL;
81086
81087 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81088 + if (!(pgprot_val(prot) & _PAGE_NX))
81089 + flags |= VM_KERNEXEC;
81090 +#endif
81091 +
81092 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
81093 __builtin_return_address(0));
81094 if (!area)
81095 @@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
81096 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
81097 return NULL;
81098
81099 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81100 + if (!(pgprot_val(prot) & _PAGE_NX))
81101 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
81102 + VMALLOC_START, VMALLOC_END, node,
81103 + gfp_mask, caller);
81104 + else
81105 +#endif
81106 +
81107 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
81108 VMALLOC_START, VMALLOC_END, node,
81109 gfp_mask, caller);
81110 @@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
81111 return addr;
81112 }
81113
81114 +#undef __vmalloc
81115 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
81116 {
81117 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
81118 @@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
81119 * For tight control over page level allocator and protection flags
81120 * use __vmalloc() instead.
81121 */
81122 +#undef vmalloc
81123 void *vmalloc(unsigned long size)
81124 {
81125 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
81126 @@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
81127 * The resulting memory area is zeroed so it can be mapped to userspace
81128 * without leaking data.
81129 */
81130 +#undef vmalloc_user
81131 void *vmalloc_user(unsigned long size)
81132 {
81133 struct vm_struct *area;
81134 @@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
81135 * For tight control over page level allocator and protection flags
81136 * use __vmalloc() instead.
81137 */
81138 +#undef vmalloc_node
81139 void *vmalloc_node(unsigned long size, int node)
81140 {
81141 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
81142 @@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
81143 * For tight control over page level allocator and protection flags
81144 * use __vmalloc() instead.
81145 */
81146 -
81147 +#undef vmalloc_exec
81148 void *vmalloc_exec(unsigned long size)
81149 {
81150 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
81151 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
81152 -1, __builtin_return_address(0));
81153 }
81154
81155 @@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
81156 * Allocate enough 32bit PA addressable pages to cover @size from the
81157 * page level allocator and map them into contiguous kernel virtual space.
81158 */
81159 +#undef vmalloc_32
81160 void *vmalloc_32(unsigned long size)
81161 {
81162 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
81163 @@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
81164 * The resulting memory area is 32bit addressable and zeroed so it can be
81165 * mapped to userspace without leaking data.
81166 */
81167 +#undef vmalloc_32_user
81168 void *vmalloc_32_user(unsigned long size)
81169 {
81170 struct vm_struct *area;
81171 @@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
81172 unsigned long uaddr = vma->vm_start;
81173 unsigned long usize = vma->vm_end - vma->vm_start;
81174
81175 + BUG_ON(vma->vm_mirror);
81176 +
81177 if ((PAGE_SIZE-1) & (unsigned long)addr)
81178 return -EINVAL;
81179
81180 diff --git a/mm/vmstat.c b/mm/vmstat.c
81181 index 42d76c6..5643dc4 100644
81182 --- a/mm/vmstat.c
81183 +++ b/mm/vmstat.c
81184 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
81185 *
81186 * vm_stat contains the global counters
81187 */
81188 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81189 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81190 EXPORT_SYMBOL(vm_stat);
81191
81192 #ifdef CONFIG_SMP
81193 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
81194 v = p->vm_stat_diff[i];
81195 p->vm_stat_diff[i] = 0;
81196 local_irq_restore(flags);
81197 - atomic_long_add(v, &zone->vm_stat[i]);
81198 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
81199 global_diff[i] += v;
81200 #ifdef CONFIG_NUMA
81201 /* 3 seconds idle till flush */
81202 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
81203
81204 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
81205 if (global_diff[i])
81206 - atomic_long_add(global_diff[i], &vm_stat[i]);
81207 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
81208 }
81209
81210 #endif
81211 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
81212 start_cpu_timer(cpu);
81213 #endif
81214 #ifdef CONFIG_PROC_FS
81215 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
81216 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
81217 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
81218 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
81219 + {
81220 + mode_t gr_mode = S_IRUGO;
81221 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
81222 + gr_mode = S_IRUSR;
81223 +#endif
81224 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
81225 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
81226 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
81227 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
81228 +#else
81229 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
81230 +#endif
81231 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
81232 + }
81233 #endif
81234 return 0;
81235 }
81236 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
81237 index a29c5ab..6143f20 100644
81238 --- a/net/8021q/vlan.c
81239 +++ b/net/8021q/vlan.c
81240 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
81241 err = -EPERM;
81242 if (!capable(CAP_NET_ADMIN))
81243 break;
81244 - if ((args.u.name_type >= 0) &&
81245 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
81246 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
81247 struct vlan_net *vn;
81248
81249 vn = net_generic(net, vlan_net_id);
81250 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
81251 index a2d2984..f9eb711 100644
81252 --- a/net/9p/trans_fd.c
81253 +++ b/net/9p/trans_fd.c
81254 @@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
81255 oldfs = get_fs();
81256 set_fs(get_ds());
81257 /* The cast to a user pointer is valid due to the set_fs() */
81258 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
81259 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
81260 set_fs(oldfs);
81261
81262 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
81263 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
81264 index 02cc7e7..4514f1b 100644
81265 --- a/net/atm/atm_misc.c
81266 +++ b/net/atm/atm_misc.c
81267 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
81268 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
81269 return 1;
81270 atm_return(vcc,truesize);
81271 - atomic_inc(&vcc->stats->rx_drop);
81272 + atomic_inc_unchecked(&vcc->stats->rx_drop);
81273 return 0;
81274 }
81275
81276 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
81277 }
81278 }
81279 atm_return(vcc,guess);
81280 - atomic_inc(&vcc->stats->rx_drop);
81281 + atomic_inc_unchecked(&vcc->stats->rx_drop);
81282 return NULL;
81283 }
81284
81285 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
81286
81287 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81288 {
81289 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81290 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81291 __SONET_ITEMS
81292 #undef __HANDLE_ITEM
81293 }
81294 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81295
81296 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81297 {
81298 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
81299 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
81300 __SONET_ITEMS
81301 #undef __HANDLE_ITEM
81302 }
81303 diff --git a/net/atm/lec.h b/net/atm/lec.h
81304 index 9d14d19..5c145f3 100644
81305 --- a/net/atm/lec.h
81306 +++ b/net/atm/lec.h
81307 @@ -48,7 +48,7 @@ struct lane2_ops {
81308 const u8 *tlvs, u32 sizeoftlvs);
81309 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
81310 const u8 *tlvs, u32 sizeoftlvs);
81311 -};
81312 +} __no_const;
81313
81314 /*
81315 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
81316 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
81317 index 0919a88..a23d54e 100644
81318 --- a/net/atm/mpc.h
81319 +++ b/net/atm/mpc.h
81320 @@ -33,7 +33,7 @@ struct mpoa_client {
81321 struct mpc_parameters parameters; /* parameters for this client */
81322
81323 const struct net_device_ops *old_ops;
81324 - struct net_device_ops new_ops;
81325 + net_device_ops_no_const new_ops;
81326 };
81327
81328
81329 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
81330 index 4504a4b..1733f1e 100644
81331 --- a/net/atm/mpoa_caches.c
81332 +++ b/net/atm/mpoa_caches.c
81333 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
81334 struct timeval now;
81335 struct k_message msg;
81336
81337 + pax_track_stack();
81338 +
81339 do_gettimeofday(&now);
81340
81341 write_lock_irq(&client->egress_lock);
81342 diff --git a/net/atm/proc.c b/net/atm/proc.c
81343 index ab8419a..aa91497 100644
81344 --- a/net/atm/proc.c
81345 +++ b/net/atm/proc.c
81346 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
81347 const struct k_atm_aal_stats *stats)
81348 {
81349 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
81350 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
81351 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
81352 - atomic_read(&stats->rx_drop));
81353 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
81354 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
81355 + atomic_read_unchecked(&stats->rx_drop));
81356 }
81357
81358 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
81359 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
81360 {
81361 struct sock *sk = sk_atm(vcc);
81362
81363 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81364 + seq_printf(seq, "%p ", NULL);
81365 +#else
81366 seq_printf(seq, "%p ", vcc);
81367 +#endif
81368 +
81369 if (!vcc->dev)
81370 seq_printf(seq, "Unassigned ");
81371 else
81372 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
81373 {
81374 if (!vcc->dev)
81375 seq_printf(seq, sizeof(void *) == 4 ?
81376 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81377 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
81378 +#else
81379 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
81380 +#endif
81381 else
81382 seq_printf(seq, "%3d %3d %5d ",
81383 vcc->dev->number, vcc->vpi, vcc->vci);
81384 diff --git a/net/atm/resources.c b/net/atm/resources.c
81385 index 56b7322..c48b84e 100644
81386 --- a/net/atm/resources.c
81387 +++ b/net/atm/resources.c
81388 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
81389 static void copy_aal_stats(struct k_atm_aal_stats *from,
81390 struct atm_aal_stats *to)
81391 {
81392 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81393 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81394 __AAL_STAT_ITEMS
81395 #undef __HANDLE_ITEM
81396 }
81397 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
81398 static void subtract_aal_stats(struct k_atm_aal_stats *from,
81399 struct atm_aal_stats *to)
81400 {
81401 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
81402 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
81403 __AAL_STAT_ITEMS
81404 #undef __HANDLE_ITEM
81405 }
81406 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
81407 index 8567d47..bba2292 100644
81408 --- a/net/bridge/br_private.h
81409 +++ b/net/bridge/br_private.h
81410 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
81411
81412 #ifdef CONFIG_SYSFS
81413 /* br_sysfs_if.c */
81414 -extern struct sysfs_ops brport_sysfs_ops;
81415 +extern const struct sysfs_ops brport_sysfs_ops;
81416 extern int br_sysfs_addif(struct net_bridge_port *p);
81417
81418 /* br_sysfs_br.c */
81419 diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
81420 index 9a52ac5..c97538e 100644
81421 --- a/net/bridge/br_stp_if.c
81422 +++ b/net/bridge/br_stp_if.c
81423 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
81424 char *envp[] = { NULL };
81425
81426 if (br->stp_enabled == BR_USER_STP) {
81427 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
81428 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
81429 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
81430 br->dev->name, r);
81431
81432 diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
81433 index 820643a..ce77fb3 100644
81434 --- a/net/bridge/br_sysfs_if.c
81435 +++ b/net/bridge/br_sysfs_if.c
81436 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
81437 return ret;
81438 }
81439
81440 -struct sysfs_ops brport_sysfs_ops = {
81441 +const struct sysfs_ops brport_sysfs_ops = {
81442 .show = brport_show,
81443 .store = brport_store,
81444 };
81445 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
81446 index d73d47f..72df42a 100644
81447 --- a/net/bridge/netfilter/ebtables.c
81448 +++ b/net/bridge/netfilter/ebtables.c
81449 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
81450 unsigned int entries_size, nentries;
81451 char *entries;
81452
81453 + pax_track_stack();
81454 +
81455 if (cmd == EBT_SO_GET_ENTRIES) {
81456 entries_size = t->private->entries_size;
81457 nentries = t->private->nentries;
81458 diff --git a/net/can/bcm.c b/net/can/bcm.c
81459 index 2ffd2e0..72a7486 100644
81460 --- a/net/can/bcm.c
81461 +++ b/net/can/bcm.c
81462 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
81463 struct bcm_sock *bo = bcm_sk(sk);
81464 struct bcm_op *op;
81465
81466 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81467 + seq_printf(m, ">>> socket %p", NULL);
81468 + seq_printf(m, " / sk %p", NULL);
81469 + seq_printf(m, " / bo %p", NULL);
81470 +#else
81471 seq_printf(m, ">>> socket %p", sk->sk_socket);
81472 seq_printf(m, " / sk %p", sk);
81473 seq_printf(m, " / bo %p", bo);
81474 +#endif
81475 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
81476 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
81477 seq_printf(m, " <<<\n");
81478 diff --git a/net/compat.c b/net/compat.c
81479 index 9559afc..ccd74e1 100644
81480 --- a/net/compat.c
81481 +++ b/net/compat.c
81482 @@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
81483 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
81484 __get_user(kmsg->msg_flags, &umsg->msg_flags))
81485 return -EFAULT;
81486 - kmsg->msg_name = compat_ptr(tmp1);
81487 - kmsg->msg_iov = compat_ptr(tmp2);
81488 - kmsg->msg_control = compat_ptr(tmp3);
81489 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
81490 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
81491 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
81492 return 0;
81493 }
81494
81495 @@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81496 kern_msg->msg_name = NULL;
81497
81498 tot_len = iov_from_user_compat_to_kern(kern_iov,
81499 - (struct compat_iovec __user *)kern_msg->msg_iov,
81500 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
81501 kern_msg->msg_iovlen);
81502 if (tot_len >= 0)
81503 kern_msg->msg_iov = kern_iov;
81504 @@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81505
81506 #define CMSG_COMPAT_FIRSTHDR(msg) \
81507 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
81508 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
81509 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
81510 (struct compat_cmsghdr __user *)NULL)
81511
81512 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
81513 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
81514 (ucmlen) <= (unsigned long) \
81515 ((mhdr)->msg_controllen - \
81516 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
81517 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
81518
81519 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
81520 struct compat_cmsghdr __user *cmsg, int cmsg_len)
81521 {
81522 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
81523 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
81524 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
81525 msg->msg_controllen)
81526 return NULL;
81527 return (struct compat_cmsghdr __user *)ptr;
81528 @@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
81529 {
81530 struct compat_timeval ctv;
81531 struct compat_timespec cts[3];
81532 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
81533 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
81534 struct compat_cmsghdr cmhdr;
81535 int cmlen;
81536
81537 @@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
81538
81539 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
81540 {
81541 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
81542 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
81543 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
81544 int fdnum = scm->fp->count;
81545 struct file **fp = scm->fp->fp;
81546 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
81547 len = sizeof(ktime);
81548 old_fs = get_fs();
81549 set_fs(KERNEL_DS);
81550 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
81551 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
81552 set_fs(old_fs);
81553
81554 if (!err) {
81555 @@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81556 case MCAST_JOIN_GROUP:
81557 case MCAST_LEAVE_GROUP:
81558 {
81559 - struct compat_group_req __user *gr32 = (void *)optval;
81560 + struct compat_group_req __user *gr32 = (void __user *)optval;
81561 struct group_req __user *kgr =
81562 compat_alloc_user_space(sizeof(struct group_req));
81563 u32 interface;
81564 @@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81565 case MCAST_BLOCK_SOURCE:
81566 case MCAST_UNBLOCK_SOURCE:
81567 {
81568 - struct compat_group_source_req __user *gsr32 = (void *)optval;
81569 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
81570 struct group_source_req __user *kgsr = compat_alloc_user_space(
81571 sizeof(struct group_source_req));
81572 u32 interface;
81573 @@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81574 }
81575 case MCAST_MSFILTER:
81576 {
81577 - struct compat_group_filter __user *gf32 = (void *)optval;
81578 + struct compat_group_filter __user *gf32 = (void __user *)optval;
81579 struct group_filter __user *kgf;
81580 u32 interface, fmode, numsrc;
81581
81582 diff --git a/net/core/dev.c b/net/core/dev.c
81583 index 84a0705..575db4c 100644
81584 --- a/net/core/dev.c
81585 +++ b/net/core/dev.c
81586 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
81587 if (no_module && capable(CAP_NET_ADMIN))
81588 no_module = request_module("netdev-%s", name);
81589 if (no_module && capable(CAP_SYS_MODULE)) {
81590 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
81591 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
81592 +#else
81593 if (!request_module("%s", name))
81594 pr_err("Loading kernel module for a network device "
81595 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
81596 "instead\n", name);
81597 +#endif
81598 }
81599 }
81600 EXPORT_SYMBOL(dev_load);
81601 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
81602
81603 struct dev_gso_cb {
81604 void (*destructor)(struct sk_buff *skb);
81605 -};
81606 +} __no_const;
81607
81608 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
81609
81610 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
81611 }
81612 EXPORT_SYMBOL(netif_rx_ni);
81613
81614 -static void net_tx_action(struct softirq_action *h)
81615 +static void net_tx_action(void)
81616 {
81617 struct softnet_data *sd = &__get_cpu_var(softnet_data);
81618
81619 @@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
81620 EXPORT_SYMBOL(netif_napi_del);
81621
81622
81623 -static void net_rx_action(struct softirq_action *h)
81624 +static void net_rx_action(void)
81625 {
81626 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
81627 unsigned long time_limit = jiffies + 2;
81628 diff --git a/net/core/flow.c b/net/core/flow.c
81629 index 9601587..8c4824e 100644
81630 --- a/net/core/flow.c
81631 +++ b/net/core/flow.c
81632 @@ -35,11 +35,11 @@ struct flow_cache_entry {
81633 atomic_t *object_ref;
81634 };
81635
81636 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
81637 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
81638
81639 static u32 flow_hash_shift;
81640 #define flow_hash_size (1 << flow_hash_shift)
81641 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
81642 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
81643
81644 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
81645
81646 @@ -52,7 +52,7 @@ struct flow_percpu_info {
81647 u32 hash_rnd;
81648 int count;
81649 };
81650 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
81651 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
81652
81653 #define flow_hash_rnd_recalc(cpu) \
81654 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
81655 @@ -69,7 +69,7 @@ struct flow_flush_info {
81656 atomic_t cpuleft;
81657 struct completion completion;
81658 };
81659 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
81660 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
81661
81662 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
81663
81664 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
81665 if (fle->family == family &&
81666 fle->dir == dir &&
81667 flow_key_compare(key, &fle->key) == 0) {
81668 - if (fle->genid == atomic_read(&flow_cache_genid)) {
81669 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
81670 void *ret = fle->object;
81671
81672 if (ret)
81673 @@ -228,7 +228,7 @@ nocache:
81674 err = resolver(net, key, family, dir, &obj, &obj_ref);
81675
81676 if (fle && !err) {
81677 - fle->genid = atomic_read(&flow_cache_genid);
81678 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
81679
81680 if (fle->object)
81681 atomic_dec(fle->object_ref);
81682 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
81683
81684 fle = flow_table(cpu)[i];
81685 for (; fle; fle = fle->next) {
81686 - unsigned genid = atomic_read(&flow_cache_genid);
81687 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
81688
81689 if (!fle->object || fle->genid == genid)
81690 continue;
81691 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
81692 index d4fd895..ac9b1e6 100644
81693 --- a/net/core/rtnetlink.c
81694 +++ b/net/core/rtnetlink.c
81695 @@ -57,7 +57,7 @@ struct rtnl_link
81696 {
81697 rtnl_doit_func doit;
81698 rtnl_dumpit_func dumpit;
81699 -};
81700 +} __no_const;
81701
81702 static DEFINE_MUTEX(rtnl_mutex);
81703
81704 diff --git a/net/core/scm.c b/net/core/scm.c
81705 index d98eafc..1a190a9 100644
81706 --- a/net/core/scm.c
81707 +++ b/net/core/scm.c
81708 @@ -191,7 +191,7 @@ error:
81709 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81710 {
81711 struct cmsghdr __user *cm
81712 - = (__force struct cmsghdr __user *)msg->msg_control;
81713 + = (struct cmsghdr __force_user *)msg->msg_control;
81714 struct cmsghdr cmhdr;
81715 int cmlen = CMSG_LEN(len);
81716 int err;
81717 @@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81718 err = -EFAULT;
81719 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
81720 goto out;
81721 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
81722 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
81723 goto out;
81724 cmlen = CMSG_SPACE(len);
81725 if (msg->msg_controllen < cmlen)
81726 @@ -229,7 +229,7 @@ out:
81727 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81728 {
81729 struct cmsghdr __user *cm
81730 - = (__force struct cmsghdr __user*)msg->msg_control;
81731 + = (struct cmsghdr __force_user *)msg->msg_control;
81732
81733 int fdmax = 0;
81734 int fdnum = scm->fp->count;
81735 @@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81736 if (fdnum < fdmax)
81737 fdmax = fdnum;
81738
81739 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
81740 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
81741 i++, cmfptr++)
81742 {
81743 int new_fd;
81744 diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
81745 index 45329d7..626aaa6 100644
81746 --- a/net/core/secure_seq.c
81747 +++ b/net/core/secure_seq.c
81748 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
81749 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
81750
81751 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81752 - __be16 dport)
81753 + __be16 dport)
81754 {
81755 u32 secret[MD5_MESSAGE_BYTES / 4];
81756 u32 hash[MD5_DIGEST_WORDS];
81757 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81758 secret[i] = net_secret[i];
81759
81760 md5_transform(hash, secret);
81761 -
81762 return hash[0];
81763 }
81764 #endif
81765 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
81766 index 025f924..70a71c4 100644
81767 --- a/net/core/skbuff.c
81768 +++ b/net/core/skbuff.c
81769 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
81770 struct sk_buff *frag_iter;
81771 struct sock *sk = skb->sk;
81772
81773 + pax_track_stack();
81774 +
81775 /*
81776 * __skb_splice_bits() only fails if the output has no room left,
81777 * so no point in going over the frag_list for the error case.
81778 diff --git a/net/core/sock.c b/net/core/sock.c
81779 index 6605e75..3acebda 100644
81780 --- a/net/core/sock.c
81781 +++ b/net/core/sock.c
81782 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
81783 break;
81784
81785 case SO_PEERCRED:
81786 + {
81787 + struct ucred peercred;
81788 if (len > sizeof(sk->sk_peercred))
81789 len = sizeof(sk->sk_peercred);
81790 - if (copy_to_user(optval, &sk->sk_peercred, len))
81791 + peercred = sk->sk_peercred;
81792 + if (copy_to_user(optval, &peercred, len))
81793 return -EFAULT;
81794 goto lenout;
81795 + }
81796
81797 case SO_PEERNAME:
81798 {
81799 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
81800 */
81801 smp_wmb();
81802 atomic_set(&sk->sk_refcnt, 1);
81803 - atomic_set(&sk->sk_drops, 0);
81804 + atomic_set_unchecked(&sk->sk_drops, 0);
81805 }
81806 EXPORT_SYMBOL(sock_init_data);
81807
81808 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
81809 index 2036568..c55883d 100644
81810 --- a/net/decnet/sysctl_net_decnet.c
81811 +++ b/net/decnet/sysctl_net_decnet.c
81812 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
81813
81814 if (len > *lenp) len = *lenp;
81815
81816 - if (copy_to_user(buffer, addr, len))
81817 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
81818 return -EFAULT;
81819
81820 *lenp = len;
81821 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
81822
81823 if (len > *lenp) len = *lenp;
81824
81825 - if (copy_to_user(buffer, devname, len))
81826 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
81827 return -EFAULT;
81828
81829 *lenp = len;
81830 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
81831 index 39a2d29..f39c0fe 100644
81832 --- a/net/econet/Kconfig
81833 +++ b/net/econet/Kconfig
81834 @@ -4,7 +4,7 @@
81835
81836 config ECONET
81837 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
81838 - depends on EXPERIMENTAL && INET
81839 + depends on EXPERIMENTAL && INET && BROKEN
81840 ---help---
81841 Econet is a fairly old and slow networking protocol mainly used by
81842 Acorn computers to access file and print servers. It uses native
81843 diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
81844 index a413b1b..380849c 100644
81845 --- a/net/ieee802154/dgram.c
81846 +++ b/net/ieee802154/dgram.c
81847 @@ -318,7 +318,7 @@ out:
81848 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
81849 {
81850 if (sock_queue_rcv_skb(sk, skb) < 0) {
81851 - atomic_inc(&sk->sk_drops);
81852 + atomic_inc_unchecked(&sk->sk_drops);
81853 kfree_skb(skb);
81854 return NET_RX_DROP;
81855 }
81856 diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
81857 index 30e74ee..bfc6ee0 100644
81858 --- a/net/ieee802154/raw.c
81859 +++ b/net/ieee802154/raw.c
81860 @@ -206,7 +206,7 @@ out:
81861 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
81862 {
81863 if (sock_queue_rcv_skb(sk, skb) < 0) {
81864 - atomic_inc(&sk->sk_drops);
81865 + atomic_inc_unchecked(&sk->sk_drops);
81866 kfree_skb(skb);
81867 return NET_RX_DROP;
81868 }
81869 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
81870 index dba56d2..acee5d6 100644
81871 --- a/net/ipv4/inet_diag.c
81872 +++ b/net/ipv4/inet_diag.c
81873 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
81874 r->idiag_retrans = 0;
81875
81876 r->id.idiag_if = sk->sk_bound_dev_if;
81877 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81878 + r->id.idiag_cookie[0] = 0;
81879 + r->id.idiag_cookie[1] = 0;
81880 +#else
81881 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
81882 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
81883 +#endif
81884
81885 r->id.idiag_sport = inet->sport;
81886 r->id.idiag_dport = inet->dport;
81887 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
81888 r->idiag_family = tw->tw_family;
81889 r->idiag_retrans = 0;
81890 r->id.idiag_if = tw->tw_bound_dev_if;
81891 +
81892 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81893 + r->id.idiag_cookie[0] = 0;
81894 + r->id.idiag_cookie[1] = 0;
81895 +#else
81896 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
81897 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
81898 +#endif
81899 +
81900 r->id.idiag_sport = tw->tw_sport;
81901 r->id.idiag_dport = tw->tw_dport;
81902 r->id.idiag_src[0] = tw->tw_rcv_saddr;
81903 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
81904 if (sk == NULL)
81905 goto unlock;
81906
81907 +#ifndef CONFIG_GRKERNSEC_HIDESYM
81908 err = -ESTALE;
81909 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
81910 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
81911 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
81912 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
81913 goto out;
81914 +#endif
81915
81916 err = -ENOMEM;
81917 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
81918 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
81919 r->idiag_retrans = req->retrans;
81920
81921 r->id.idiag_if = sk->sk_bound_dev_if;
81922 +
81923 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81924 + r->id.idiag_cookie[0] = 0;
81925 + r->id.idiag_cookie[1] = 0;
81926 +#else
81927 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
81928 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
81929 +#endif
81930
81931 tmo = req->expires - jiffies;
81932 if (tmo < 0)
81933 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
81934 index d717267..56de7e7 100644
81935 --- a/net/ipv4/inet_hashtables.c
81936 +++ b/net/ipv4/inet_hashtables.c
81937 @@ -18,12 +18,15 @@
81938 #include <linux/sched.h>
81939 #include <linux/slab.h>
81940 #include <linux/wait.h>
81941 +#include <linux/security.h>
81942
81943 #include <net/inet_connection_sock.h>
81944 #include <net/inet_hashtables.h>
81945 #include <net/secure_seq.h>
81946 #include <net/ip.h>
81947
81948 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
81949 +
81950 /*
81951 * Allocate and initialize a new local port bind bucket.
81952 * The bindhash mutex for snum's hash chain must be held here.
81953 @@ -491,6 +494,8 @@ ok:
81954 }
81955 spin_unlock(&head->lock);
81956
81957 + gr_update_task_in_ip_table(current, inet_sk(sk));
81958 +
81959 if (tw) {
81960 inet_twsk_deschedule(tw, death_row);
81961 inet_twsk_put(tw);
81962 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
81963 index 13b229f..6956484 100644
81964 --- a/net/ipv4/inetpeer.c
81965 +++ b/net/ipv4/inetpeer.c
81966 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81967 struct inet_peer *p, *n;
81968 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
81969
81970 + pax_track_stack();
81971 +
81972 /* Look up for the address quickly. */
81973 read_lock_bh(&peer_pool_lock);
81974 p = lookup(daddr, NULL);
81975 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81976 return NULL;
81977 n->v4daddr = daddr;
81978 atomic_set(&n->refcnt, 1);
81979 - atomic_set(&n->rid, 0);
81980 + atomic_set_unchecked(&n->rid, 0);
81981 n->ip_id_count = secure_ip_id(daddr);
81982 n->tcp_ts_stamp = 0;
81983
81984 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
81985 index d3fe10b..feeafc9 100644
81986 --- a/net/ipv4/ip_fragment.c
81987 +++ b/net/ipv4/ip_fragment.c
81988 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
81989 return 0;
81990
81991 start = qp->rid;
81992 - end = atomic_inc_return(&peer->rid);
81993 + end = atomic_inc_return_unchecked(&peer->rid);
81994 qp->rid = end;
81995
81996 rc = qp->q.fragments && (end - start) > max;
81997 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
81998 index e982b5c..f079d75 100644
81999 --- a/net/ipv4/ip_sockglue.c
82000 +++ b/net/ipv4/ip_sockglue.c
82001 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
82002 int val;
82003 int len;
82004
82005 + pax_track_stack();
82006 +
82007 if (level != SOL_IP)
82008 return -EOPNOTSUPP;
82009
82010 @@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
82011 if (sk->sk_type != SOCK_STREAM)
82012 return -ENOPROTOOPT;
82013
82014 - msg.msg_control = optval;
82015 + msg.msg_control = (void __force_kernel *)optval;
82016 msg.msg_controllen = len;
82017 msg.msg_flags = 0;
82018
82019 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
82020 index f8d04c2..c1188f2 100644
82021 --- a/net/ipv4/ipconfig.c
82022 +++ b/net/ipv4/ipconfig.c
82023 @@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
82024
82025 mm_segment_t oldfs = get_fs();
82026 set_fs(get_ds());
82027 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
82028 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
82029 set_fs(oldfs);
82030 return res;
82031 }
82032 @@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
82033
82034 mm_segment_t oldfs = get_fs();
82035 set_fs(get_ds());
82036 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
82037 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
82038 set_fs(oldfs);
82039 return res;
82040 }
82041 @@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
82042
82043 mm_segment_t oldfs = get_fs();
82044 set_fs(get_ds());
82045 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
82046 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
82047 set_fs(oldfs);
82048 return res;
82049 }
82050 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
82051 index c8b0cc3..4da5ae2 100644
82052 --- a/net/ipv4/netfilter/arp_tables.c
82053 +++ b/net/ipv4/netfilter/arp_tables.c
82054 @@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82055 private = &tmp;
82056 }
82057 #endif
82058 + memset(&info, 0, sizeof(info));
82059 info.valid_hooks = t->valid_hooks;
82060 memcpy(info.hook_entry, private->hook_entry,
82061 sizeof(info.hook_entry));
82062 diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
82063 index c156db2..e772975 100644
82064 --- a/net/ipv4/netfilter/ip_queue.c
82065 +++ b/net/ipv4/netfilter/ip_queue.c
82066 @@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
82067
82068 if (v->data_len < sizeof(*user_iph))
82069 return 0;
82070 + if (v->data_len > 65535)
82071 + return -EMSGSIZE;
82072 +
82073 diff = v->data_len - e->skb->len;
82074 if (diff < 0) {
82075 if (pskb_trim(e->skb, v->data_len))
82076 @@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
82077 static inline void
82078 __ipq_rcv_skb(struct sk_buff *skb)
82079 {
82080 - int status, type, pid, flags, nlmsglen, skblen;
82081 + int status, type, pid, flags;
82082 + unsigned int nlmsglen, skblen;
82083 struct nlmsghdr *nlh;
82084
82085 skblen = skb->len;
82086 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
82087 index 0606db1..02e7e4c 100644
82088 --- a/net/ipv4/netfilter/ip_tables.c
82089 +++ b/net/ipv4/netfilter/ip_tables.c
82090 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82091 private = &tmp;
82092 }
82093 #endif
82094 + memset(&info, 0, sizeof(info));
82095 info.valid_hooks = t->valid_hooks;
82096 memcpy(info.hook_entry, private->hook_entry,
82097 sizeof(info.hook_entry));
82098 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
82099 index d9521f6..3c3eb25 100644
82100 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
82101 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
82102 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
82103
82104 *len = 0;
82105
82106 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
82107 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
82108 if (*octets == NULL) {
82109 if (net_ratelimit())
82110 printk("OOM in bsalg (%d)\n", __LINE__);
82111 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
82112 index ab996f9..3da5f96 100644
82113 --- a/net/ipv4/raw.c
82114 +++ b/net/ipv4/raw.c
82115 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
82116 /* Charge it to the socket. */
82117
82118 if (sock_queue_rcv_skb(sk, skb) < 0) {
82119 - atomic_inc(&sk->sk_drops);
82120 + atomic_inc_unchecked(&sk->sk_drops);
82121 kfree_skb(skb);
82122 return NET_RX_DROP;
82123 }
82124 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
82125 int raw_rcv(struct sock *sk, struct sk_buff *skb)
82126 {
82127 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
82128 - atomic_inc(&sk->sk_drops);
82129 + atomic_inc_unchecked(&sk->sk_drops);
82130 kfree_skb(skb);
82131 return NET_RX_DROP;
82132 }
82133 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
82134
82135 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
82136 {
82137 + struct icmp_filter filter;
82138 +
82139 + if (optlen < 0)
82140 + return -EINVAL;
82141 if (optlen > sizeof(struct icmp_filter))
82142 optlen = sizeof(struct icmp_filter);
82143 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
82144 + if (copy_from_user(&filter, optval, optlen))
82145 return -EFAULT;
82146 + raw_sk(sk)->filter = filter;
82147 +
82148 return 0;
82149 }
82150
82151 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
82152 {
82153 int len, ret = -EFAULT;
82154 + struct icmp_filter filter;
82155
82156 if (get_user(len, optlen))
82157 goto out;
82158 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
82159 if (len > sizeof(struct icmp_filter))
82160 len = sizeof(struct icmp_filter);
82161 ret = -EFAULT;
82162 - if (put_user(len, optlen) ||
82163 - copy_to_user(optval, &raw_sk(sk)->filter, len))
82164 + filter = raw_sk(sk)->filter;
82165 + if (put_user(len, optlen) || len > sizeof filter ||
82166 + copy_to_user(optval, &filter, len))
82167 goto out;
82168 ret = 0;
82169 out: return ret;
82170 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82171 sk_wmem_alloc_get(sp),
82172 sk_rmem_alloc_get(sp),
82173 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
82174 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82175 + atomic_read(&sp->sk_refcnt),
82176 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82177 + NULL,
82178 +#else
82179 + sp,
82180 +#endif
82181 + atomic_read_unchecked(&sp->sk_drops));
82182 }
82183
82184 static int raw_seq_show(struct seq_file *seq, void *v)
82185 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
82186 index 58f141b..b759702 100644
82187 --- a/net/ipv4/route.c
82188 +++ b/net/ipv4/route.c
82189 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
82190
82191 static inline int rt_genid(struct net *net)
82192 {
82193 - return atomic_read(&net->ipv4.rt_genid);
82194 + return atomic_read_unchecked(&net->ipv4.rt_genid);
82195 }
82196
82197 #ifdef CONFIG_PROC_FS
82198 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
82199 unsigned char shuffle;
82200
82201 get_random_bytes(&shuffle, sizeof(shuffle));
82202 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
82203 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
82204 }
82205
82206 /*
82207 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
82208
82209 static __net_init int rt_secret_timer_init(struct net *net)
82210 {
82211 - atomic_set(&net->ipv4.rt_genid,
82212 + atomic_set_unchecked(&net->ipv4.rt_genid,
82213 (int) ((num_physpages ^ (num_physpages>>8)) ^
82214 (jiffies ^ (jiffies >> 7))));
82215
82216 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
82217 index f095659..adc892a 100644
82218 --- a/net/ipv4/tcp.c
82219 +++ b/net/ipv4/tcp.c
82220 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
82221 int val;
82222 int err = 0;
82223
82224 + pax_track_stack();
82225 +
82226 /* This is a string value all the others are int's */
82227 if (optname == TCP_CONGESTION) {
82228 char name[TCP_CA_NAME_MAX];
82229 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
82230 struct tcp_sock *tp = tcp_sk(sk);
82231 int val, len;
82232
82233 + pax_track_stack();
82234 +
82235 if (get_user(len, optlen))
82236 return -EFAULT;
82237
82238 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
82239 index 6fc7961..33bad4a 100644
82240 --- a/net/ipv4/tcp_ipv4.c
82241 +++ b/net/ipv4/tcp_ipv4.c
82242 @@ -85,6 +85,9 @@
82243 int sysctl_tcp_tw_reuse __read_mostly;
82244 int sysctl_tcp_low_latency __read_mostly;
82245
82246 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82247 +extern int grsec_enable_blackhole;
82248 +#endif
82249
82250 #ifdef CONFIG_TCP_MD5SIG
82251 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
82252 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
82253 return 0;
82254
82255 reset:
82256 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82257 + if (!grsec_enable_blackhole)
82258 +#endif
82259 tcp_v4_send_reset(rsk, skb);
82260 discard:
82261 kfree_skb(skb);
82262 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
82263 TCP_SKB_CB(skb)->sacked = 0;
82264
82265 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82266 - if (!sk)
82267 + if (!sk) {
82268 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82269 + ret = 1;
82270 +#endif
82271 goto no_tcp_socket;
82272 + }
82273
82274 process:
82275 - if (sk->sk_state == TCP_TIME_WAIT)
82276 + if (sk->sk_state == TCP_TIME_WAIT) {
82277 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82278 + ret = 2;
82279 +#endif
82280 goto do_time_wait;
82281 + }
82282
82283 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
82284 goto discard_and_relse;
82285 @@ -1651,6 +1665,10 @@ no_tcp_socket:
82286 bad_packet:
82287 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82288 } else {
82289 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82290 + if (!grsec_enable_blackhole || (ret == 1 &&
82291 + (skb->dev->flags & IFF_LOOPBACK)))
82292 +#endif
82293 tcp_v4_send_reset(NULL, skb);
82294 }
82295
82296 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
82297 0, /* non standard timer */
82298 0, /* open_requests have no inode */
82299 atomic_read(&sk->sk_refcnt),
82300 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82301 + NULL,
82302 +#else
82303 req,
82304 +#endif
82305 len);
82306 }
82307
82308 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
82309 sock_i_uid(sk),
82310 icsk->icsk_probes_out,
82311 sock_i_ino(sk),
82312 - atomic_read(&sk->sk_refcnt), sk,
82313 + atomic_read(&sk->sk_refcnt),
82314 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82315 + NULL,
82316 +#else
82317 + sk,
82318 +#endif
82319 jiffies_to_clock_t(icsk->icsk_rto),
82320 jiffies_to_clock_t(icsk->icsk_ack.ato),
82321 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
82322 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
82323 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
82324 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
82325 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82326 - atomic_read(&tw->tw_refcnt), tw, len);
82327 + atomic_read(&tw->tw_refcnt),
82328 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82329 + NULL,
82330 +#else
82331 + tw,
82332 +#endif
82333 + len);
82334 }
82335
82336 #define TMPSZ 150
82337 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
82338 index 4c03598..e09a8e8 100644
82339 --- a/net/ipv4/tcp_minisocks.c
82340 +++ b/net/ipv4/tcp_minisocks.c
82341 @@ -26,6 +26,10 @@
82342 #include <net/inet_common.h>
82343 #include <net/xfrm.h>
82344
82345 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82346 +extern int grsec_enable_blackhole;
82347 +#endif
82348 +
82349 #ifdef CONFIG_SYSCTL
82350 #define SYNC_INIT 0 /* let the user enable it */
82351 #else
82352 @@ -672,6 +676,10 @@ listen_overflow:
82353
82354 embryonic_reset:
82355 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
82356 +
82357 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82358 + if (!grsec_enable_blackhole)
82359 +#endif
82360 if (!(flg & TCP_FLAG_RST))
82361 req->rsk_ops->send_reset(sk, skb);
82362
82363 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
82364 index af83bdf..ec91cb2 100644
82365 --- a/net/ipv4/tcp_output.c
82366 +++ b/net/ipv4/tcp_output.c
82367 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
82368 __u8 *md5_hash_location;
82369 int mss;
82370
82371 + pax_track_stack();
82372 +
82373 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
82374 if (skb == NULL)
82375 return NULL;
82376 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
82377 index 59f5b5e..193860f 100644
82378 --- a/net/ipv4/tcp_probe.c
82379 +++ b/net/ipv4/tcp_probe.c
82380 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
82381 if (cnt + width >= len)
82382 break;
82383
82384 - if (copy_to_user(buf + cnt, tbuf, width))
82385 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
82386 return -EFAULT;
82387 cnt += width;
82388 }
82389 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
82390 index 57d5501..a9ed13a 100644
82391 --- a/net/ipv4/tcp_timer.c
82392 +++ b/net/ipv4/tcp_timer.c
82393 @@ -21,6 +21,10 @@
82394 #include <linux/module.h>
82395 #include <net/tcp.h>
82396
82397 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82398 +extern int grsec_lastack_retries;
82399 +#endif
82400 +
82401 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
82402 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
82403 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
82404 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
82405 }
82406 }
82407
82408 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82409 + if ((sk->sk_state == TCP_LAST_ACK) &&
82410 + (grsec_lastack_retries > 0) &&
82411 + (grsec_lastack_retries < retry_until))
82412 + retry_until = grsec_lastack_retries;
82413 +#endif
82414 +
82415 if (retransmits_timed_out(sk, retry_until)) {
82416 /* Has it gone just too far? */
82417 tcp_write_err(sk);
82418 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
82419 index 8e28770..72105c8 100644
82420 --- a/net/ipv4/udp.c
82421 +++ b/net/ipv4/udp.c
82422 @@ -86,6 +86,7 @@
82423 #include <linux/types.h>
82424 #include <linux/fcntl.h>
82425 #include <linux/module.h>
82426 +#include <linux/security.h>
82427 #include <linux/socket.h>
82428 #include <linux/sockios.h>
82429 #include <linux/igmp.h>
82430 @@ -106,6 +107,10 @@
82431 #include <net/xfrm.h>
82432 #include "udp_impl.h"
82433
82434 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82435 +extern int grsec_enable_blackhole;
82436 +#endif
82437 +
82438 struct udp_table udp_table;
82439 EXPORT_SYMBOL(udp_table);
82440
82441 @@ -371,6 +376,9 @@ found:
82442 return s;
82443 }
82444
82445 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
82446 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
82447 +
82448 /*
82449 * This routine is called by the ICMP module when it gets some
82450 * sort of error condition. If err < 0 then the socket should
82451 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
82452 dport = usin->sin_port;
82453 if (dport == 0)
82454 return -EINVAL;
82455 +
82456 + err = gr_search_udp_sendmsg(sk, usin);
82457 + if (err)
82458 + return err;
82459 } else {
82460 if (sk->sk_state != TCP_ESTABLISHED)
82461 return -EDESTADDRREQ;
82462 +
82463 + err = gr_search_udp_sendmsg(sk, NULL);
82464 + if (err)
82465 + return err;
82466 +
82467 daddr = inet->daddr;
82468 dport = inet->dport;
82469 /* Open fast path for connected socket.
82470 @@ -945,6 +962,10 @@ try_again:
82471 if (!skb)
82472 goto out;
82473
82474 + err = gr_search_udp_recvmsg(sk, skb);
82475 + if (err)
82476 + goto out_free;
82477 +
82478 ulen = skb->len - sizeof(struct udphdr);
82479 copied = len;
82480 if (copied > ulen)
82481 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
82482 if (rc == -ENOMEM) {
82483 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
82484 is_udplite);
82485 - atomic_inc(&sk->sk_drops);
82486 + atomic_inc_unchecked(&sk->sk_drops);
82487 }
82488 goto drop;
82489 }
82490 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82491 goto csum_error;
82492
82493 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
82494 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82495 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82496 +#endif
82497 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
82498
82499 /*
82500 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
82501 sk_wmem_alloc_get(sp),
82502 sk_rmem_alloc_get(sp),
82503 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
82504 - atomic_read(&sp->sk_refcnt), sp,
82505 - atomic_read(&sp->sk_drops), len);
82506 + atomic_read(&sp->sk_refcnt),
82507 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82508 + NULL,
82509 +#else
82510 + sp,
82511 +#endif
82512 + atomic_read_unchecked(&sp->sk_drops), len);
82513 }
82514
82515 int udp4_seq_show(struct seq_file *seq, void *v)
82516 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
82517 index 8ac3d09..fc58c5f 100644
82518 --- a/net/ipv6/addrconf.c
82519 +++ b/net/ipv6/addrconf.c
82520 @@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
82521 p.iph.ihl = 5;
82522 p.iph.protocol = IPPROTO_IPV6;
82523 p.iph.ttl = 64;
82524 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
82525 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
82526
82527 if (ops->ndo_do_ioctl) {
82528 mm_segment_t oldfs = get_fs();
82529 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
82530 index cc4797d..7cfdfcc 100644
82531 --- a/net/ipv6/inet6_connection_sock.c
82532 +++ b/net/ipv6/inet6_connection_sock.c
82533 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
82534 #ifdef CONFIG_XFRM
82535 {
82536 struct rt6_info *rt = (struct rt6_info *)dst;
82537 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
82538 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
82539 }
82540 #endif
82541 }
82542 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
82543 #ifdef CONFIG_XFRM
82544 if (dst) {
82545 struct rt6_info *rt = (struct rt6_info *)dst;
82546 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
82547 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
82548 sk->sk_dst_cache = NULL;
82549 dst_release(dst);
82550 dst = NULL;
82551 diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
82552 index 093e9b2..f72cddb 100644
82553 --- a/net/ipv6/inet6_hashtables.c
82554 +++ b/net/ipv6/inet6_hashtables.c
82555 @@ -119,7 +119,7 @@ out:
82556 }
82557 EXPORT_SYMBOL(__inet6_lookup_established);
82558
82559 -static int inline compute_score(struct sock *sk, struct net *net,
82560 +static inline int compute_score(struct sock *sk, struct net *net,
82561 const unsigned short hnum,
82562 const struct in6_addr *daddr,
82563 const int dif)
82564 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
82565 index 4f7aaf6..f7acf45 100644
82566 --- a/net/ipv6/ipv6_sockglue.c
82567 +++ b/net/ipv6/ipv6_sockglue.c
82568 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
82569 int val, valbool;
82570 int retv = -ENOPROTOOPT;
82571
82572 + pax_track_stack();
82573 +
82574 if (optval == NULL)
82575 val=0;
82576 else {
82577 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
82578 int len;
82579 int val;
82580
82581 + pax_track_stack();
82582 +
82583 if (ip6_mroute_opt(optname))
82584 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
82585
82586 @@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
82587 if (sk->sk_type != SOCK_STREAM)
82588 return -ENOPROTOOPT;
82589
82590 - msg.msg_control = optval;
82591 + msg.msg_control = (void __force_kernel *)optval;
82592 msg.msg_controllen = len;
82593 msg.msg_flags = 0;
82594
82595 diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
82596 index 1cf3f0c..1d4376f 100644
82597 --- a/net/ipv6/netfilter/ip6_queue.c
82598 +++ b/net/ipv6/netfilter/ip6_queue.c
82599 @@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
82600
82601 if (v->data_len < sizeof(*user_iph))
82602 return 0;
82603 + if (v->data_len > 65535)
82604 + return -EMSGSIZE;
82605 +
82606 diff = v->data_len - e->skb->len;
82607 if (diff < 0) {
82608 if (pskb_trim(e->skb, v->data_len))
82609 @@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
82610 static inline void
82611 __ipq_rcv_skb(struct sk_buff *skb)
82612 {
82613 - int status, type, pid, flags, nlmsglen, skblen;
82614 + int status, type, pid, flags;
82615 + unsigned int nlmsglen, skblen;
82616 struct nlmsghdr *nlh;
82617
82618 skblen = skb->len;
82619 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
82620 index 78b5a36..7f37433 100644
82621 --- a/net/ipv6/netfilter/ip6_tables.c
82622 +++ b/net/ipv6/netfilter/ip6_tables.c
82623 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82624 private = &tmp;
82625 }
82626 #endif
82627 + memset(&info, 0, sizeof(info));
82628 info.valid_hooks = t->valid_hooks;
82629 memcpy(info.hook_entry, private->hook_entry,
82630 sizeof(info.hook_entry));
82631 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
82632 index 4f24570..b813b34 100644
82633 --- a/net/ipv6/raw.c
82634 +++ b/net/ipv6/raw.c
82635 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
82636 {
82637 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
82638 skb_checksum_complete(skb)) {
82639 - atomic_inc(&sk->sk_drops);
82640 + atomic_inc_unchecked(&sk->sk_drops);
82641 kfree_skb(skb);
82642 return NET_RX_DROP;
82643 }
82644
82645 /* Charge it to the socket. */
82646 if (sock_queue_rcv_skb(sk,skb)<0) {
82647 - atomic_inc(&sk->sk_drops);
82648 + atomic_inc_unchecked(&sk->sk_drops);
82649 kfree_skb(skb);
82650 return NET_RX_DROP;
82651 }
82652 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82653 struct raw6_sock *rp = raw6_sk(sk);
82654
82655 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
82656 - atomic_inc(&sk->sk_drops);
82657 + atomic_inc_unchecked(&sk->sk_drops);
82658 kfree_skb(skb);
82659 return NET_RX_DROP;
82660 }
82661 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82662
82663 if (inet->hdrincl) {
82664 if (skb_checksum_complete(skb)) {
82665 - atomic_inc(&sk->sk_drops);
82666 + atomic_inc_unchecked(&sk->sk_drops);
82667 kfree_skb(skb);
82668 return NET_RX_DROP;
82669 }
82670 @@ -518,7 +518,7 @@ csum_copy_err:
82671 as some normal condition.
82672 */
82673 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
82674 - atomic_inc(&sk->sk_drops);
82675 + atomic_inc_unchecked(&sk->sk_drops);
82676 goto out;
82677 }
82678
82679 @@ -600,7 +600,7 @@ out:
82680 return err;
82681 }
82682
82683 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
82684 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
82685 struct flowi *fl, struct rt6_info *rt,
82686 unsigned int flags)
82687 {
82688 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
82689 u16 proto;
82690 int err;
82691
82692 + pax_track_stack();
82693 +
82694 /* Rough check on arithmetic overflow,
82695 better check is made in ip6_append_data().
82696 */
82697 @@ -916,12 +918,17 @@ do_confirm:
82698 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
82699 char __user *optval, int optlen)
82700 {
82701 + struct icmp6_filter filter;
82702 +
82703 switch (optname) {
82704 case ICMPV6_FILTER:
82705 + if (optlen < 0)
82706 + return -EINVAL;
82707 if (optlen > sizeof(struct icmp6_filter))
82708 optlen = sizeof(struct icmp6_filter);
82709 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
82710 + if (copy_from_user(&filter, optval, optlen))
82711 return -EFAULT;
82712 + raw6_sk(sk)->filter = filter;
82713 return 0;
82714 default:
82715 return -ENOPROTOOPT;
82716 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82717 char __user *optval, int __user *optlen)
82718 {
82719 int len;
82720 + struct icmp6_filter filter;
82721
82722 switch (optname) {
82723 case ICMPV6_FILTER:
82724 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82725 len = sizeof(struct icmp6_filter);
82726 if (put_user(len, optlen))
82727 return -EFAULT;
82728 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
82729 + filter = raw6_sk(sk)->filter;
82730 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
82731 return -EFAULT;
82732 return 0;
82733 default:
82734 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82735 0, 0L, 0,
82736 sock_i_uid(sp), 0,
82737 sock_i_ino(sp),
82738 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82739 + atomic_read(&sp->sk_refcnt),
82740 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82741 + NULL,
82742 +#else
82743 + sp,
82744 +#endif
82745 + atomic_read_unchecked(&sp->sk_drops));
82746 }
82747
82748 static int raw6_seq_show(struct seq_file *seq, void *v)
82749 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
82750 index faae6df..d4430c1 100644
82751 --- a/net/ipv6/tcp_ipv6.c
82752 +++ b/net/ipv6/tcp_ipv6.c
82753 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
82754 }
82755 #endif
82756
82757 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82758 +extern int grsec_enable_blackhole;
82759 +#endif
82760 +
82761 static void tcp_v6_hash(struct sock *sk)
82762 {
82763 if (sk->sk_state != TCP_CLOSE) {
82764 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
82765 return 0;
82766
82767 reset:
82768 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82769 + if (!grsec_enable_blackhole)
82770 +#endif
82771 tcp_v6_send_reset(sk, skb);
82772 discard:
82773 if (opt_skb)
82774 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
82775 TCP_SKB_CB(skb)->sacked = 0;
82776
82777 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82778 - if (!sk)
82779 + if (!sk) {
82780 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82781 + ret = 1;
82782 +#endif
82783 goto no_tcp_socket;
82784 + }
82785
82786 process:
82787 - if (sk->sk_state == TCP_TIME_WAIT)
82788 + if (sk->sk_state == TCP_TIME_WAIT) {
82789 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82790 + ret = 2;
82791 +#endif
82792 goto do_time_wait;
82793 + }
82794
82795 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
82796 goto discard_and_relse;
82797 @@ -1701,6 +1716,10 @@ no_tcp_socket:
82798 bad_packet:
82799 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82800 } else {
82801 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82802 + if (!grsec_enable_blackhole || (ret == 1 &&
82803 + (skb->dev->flags & IFF_LOOPBACK)))
82804 +#endif
82805 tcp_v6_send_reset(NULL, skb);
82806 }
82807
82808 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
82809 uid,
82810 0, /* non standard timer */
82811 0, /* open_requests have no inode */
82812 - 0, req);
82813 + 0,
82814 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82815 + NULL
82816 +#else
82817 + req
82818 +#endif
82819 + );
82820 }
82821
82822 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82823 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82824 sock_i_uid(sp),
82825 icsk->icsk_probes_out,
82826 sock_i_ino(sp),
82827 - atomic_read(&sp->sk_refcnt), sp,
82828 + atomic_read(&sp->sk_refcnt),
82829 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82830 + NULL,
82831 +#else
82832 + sp,
82833 +#endif
82834 jiffies_to_clock_t(icsk->icsk_rto),
82835 jiffies_to_clock_t(icsk->icsk_ack.ato),
82836 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
82837 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
82838 dest->s6_addr32[2], dest->s6_addr32[3], destp,
82839 tw->tw_substate, 0, 0,
82840 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82841 - atomic_read(&tw->tw_refcnt), tw);
82842 + atomic_read(&tw->tw_refcnt),
82843 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82844 + NULL
82845 +#else
82846 + tw
82847 +#endif
82848 + );
82849 }
82850
82851 static int tcp6_seq_show(struct seq_file *seq, void *v)
82852 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
82853 index 9cc6289..052c521 100644
82854 --- a/net/ipv6/udp.c
82855 +++ b/net/ipv6/udp.c
82856 @@ -49,6 +49,10 @@
82857 #include <linux/seq_file.h>
82858 #include "udp_impl.h"
82859
82860 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82861 +extern int grsec_enable_blackhole;
82862 +#endif
82863 +
82864 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
82865 {
82866 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
82867 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
82868 if (rc == -ENOMEM) {
82869 UDP6_INC_STATS_BH(sock_net(sk),
82870 UDP_MIB_RCVBUFERRORS, is_udplite);
82871 - atomic_inc(&sk->sk_drops);
82872 + atomic_inc_unchecked(&sk->sk_drops);
82873 }
82874 goto drop;
82875 }
82876 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82877 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
82878 proto == IPPROTO_UDPLITE);
82879
82880 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82881 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82882 +#endif
82883 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
82884
82885 kfree_skb(skb);
82886 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
82887 0, 0L, 0,
82888 sock_i_uid(sp), 0,
82889 sock_i_ino(sp),
82890 - atomic_read(&sp->sk_refcnt), sp,
82891 - atomic_read(&sp->sk_drops));
82892 + atomic_read(&sp->sk_refcnt),
82893 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82894 + NULL,
82895 +#else
82896 + sp,
82897 +#endif
82898 + atomic_read_unchecked(&sp->sk_drops));
82899 }
82900
82901 int udp6_seq_show(struct seq_file *seq, void *v)
82902 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
82903 index 811984d..11f59b7 100644
82904 --- a/net/irda/ircomm/ircomm_tty.c
82905 +++ b/net/irda/ircomm/ircomm_tty.c
82906 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82907 add_wait_queue(&self->open_wait, &wait);
82908
82909 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
82910 - __FILE__,__LINE__, tty->driver->name, self->open_count );
82911 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82912
82913 /* As far as I can see, we protect open_count - Jean II */
82914 spin_lock_irqsave(&self->spinlock, flags);
82915 if (!tty_hung_up_p(filp)) {
82916 extra_count = 1;
82917 - self->open_count--;
82918 + local_dec(&self->open_count);
82919 }
82920 spin_unlock_irqrestore(&self->spinlock, flags);
82921 - self->blocked_open++;
82922 + local_inc(&self->blocked_open);
82923
82924 while (1) {
82925 if (tty->termios->c_cflag & CBAUD) {
82926 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82927 }
82928
82929 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
82930 - __FILE__,__LINE__, tty->driver->name, self->open_count );
82931 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82932
82933 schedule();
82934 }
82935 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82936 if (extra_count) {
82937 /* ++ is not atomic, so this should be protected - Jean II */
82938 spin_lock_irqsave(&self->spinlock, flags);
82939 - self->open_count++;
82940 + local_inc(&self->open_count);
82941 spin_unlock_irqrestore(&self->spinlock, flags);
82942 }
82943 - self->blocked_open--;
82944 + local_dec(&self->blocked_open);
82945
82946 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
82947 - __FILE__,__LINE__, tty->driver->name, self->open_count);
82948 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
82949
82950 if (!retval)
82951 self->flags |= ASYNC_NORMAL_ACTIVE;
82952 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
82953 }
82954 /* ++ is not atomic, so this should be protected - Jean II */
82955 spin_lock_irqsave(&self->spinlock, flags);
82956 - self->open_count++;
82957 + local_inc(&self->open_count);
82958
82959 tty->driver_data = self;
82960 self->tty = tty;
82961 spin_unlock_irqrestore(&self->spinlock, flags);
82962
82963 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
82964 - self->line, self->open_count);
82965 + self->line, local_read(&self->open_count));
82966
82967 /* Not really used by us, but lets do it anyway */
82968 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
82969 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82970 return;
82971 }
82972
82973 - if ((tty->count == 1) && (self->open_count != 1)) {
82974 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
82975 /*
82976 * Uh, oh. tty->count is 1, which means that the tty
82977 * structure will be freed. state->count should always
82978 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82979 */
82980 IRDA_DEBUG(0, "%s(), bad serial port count; "
82981 "tty->count is 1, state->count is %d\n", __func__ ,
82982 - self->open_count);
82983 - self->open_count = 1;
82984 + local_read(&self->open_count));
82985 + local_set(&self->open_count, 1);
82986 }
82987
82988 - if (--self->open_count < 0) {
82989 + if (local_dec_return(&self->open_count) < 0) {
82990 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
82991 - __func__, self->line, self->open_count);
82992 - self->open_count = 0;
82993 + __func__, self->line, local_read(&self->open_count));
82994 + local_set(&self->open_count, 0);
82995 }
82996 - if (self->open_count) {
82997 + if (local_read(&self->open_count)) {
82998 spin_unlock_irqrestore(&self->spinlock, flags);
82999
83000 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
83001 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
83002 tty->closing = 0;
83003 self->tty = NULL;
83004
83005 - if (self->blocked_open) {
83006 + if (local_read(&self->blocked_open)) {
83007 if (self->close_delay)
83008 schedule_timeout_interruptible(self->close_delay);
83009 wake_up_interruptible(&self->open_wait);
83010 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
83011 spin_lock_irqsave(&self->spinlock, flags);
83012 self->flags &= ~ASYNC_NORMAL_ACTIVE;
83013 self->tty = NULL;
83014 - self->open_count = 0;
83015 + local_set(&self->open_count, 0);
83016 spin_unlock_irqrestore(&self->spinlock, flags);
83017
83018 wake_up_interruptible(&self->open_wait);
83019 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
83020 seq_putc(m, '\n');
83021
83022 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
83023 - seq_printf(m, "Open count: %d\n", self->open_count);
83024 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
83025 seq_printf(m, "Max data size: %d\n", self->max_data_size);
83026 seq_printf(m, "Max header size: %d\n", self->max_header_size);
83027
83028 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
83029 index bada1b9..f325943 100644
83030 --- a/net/iucv/af_iucv.c
83031 +++ b/net/iucv/af_iucv.c
83032 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
83033
83034 write_lock_bh(&iucv_sk_list.lock);
83035
83036 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
83037 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
83038 while (__iucv_get_sock_by_name(name)) {
83039 sprintf(name, "%08x",
83040 - atomic_inc_return(&iucv_sk_list.autobind_name));
83041 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
83042 }
83043
83044 write_unlock_bh(&iucv_sk_list.lock);
83045 diff --git a/net/key/af_key.c b/net/key/af_key.c
83046 index 4e98193..439b449 100644
83047 --- a/net/key/af_key.c
83048 +++ b/net/key/af_key.c
83049 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
83050 struct xfrm_migrate m[XFRM_MAX_DEPTH];
83051 struct xfrm_kmaddress k;
83052
83053 + pax_track_stack();
83054 +
83055 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
83056 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
83057 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
83058 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
83059 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
83060 else
83061 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
83062 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83063 + NULL,
83064 +#else
83065 s,
83066 +#endif
83067 atomic_read(&s->sk_refcnt),
83068 sk_rmem_alloc_get(s),
83069 sk_wmem_alloc_get(s),
83070 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
83071 index bda96d1..c038b72 100644
83072 --- a/net/lapb/lapb_iface.c
83073 +++ b/net/lapb/lapb_iface.c
83074 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
83075 goto out;
83076
83077 lapb->dev = dev;
83078 - lapb->callbacks = *callbacks;
83079 + lapb->callbacks = callbacks;
83080
83081 __lapb_insert_cb(lapb);
83082
83083 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
83084
83085 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
83086 {
83087 - if (lapb->callbacks.connect_confirmation)
83088 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
83089 + if (lapb->callbacks->connect_confirmation)
83090 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
83091 }
83092
83093 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
83094 {
83095 - if (lapb->callbacks.connect_indication)
83096 - lapb->callbacks.connect_indication(lapb->dev, reason);
83097 + if (lapb->callbacks->connect_indication)
83098 + lapb->callbacks->connect_indication(lapb->dev, reason);
83099 }
83100
83101 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
83102 {
83103 - if (lapb->callbacks.disconnect_confirmation)
83104 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
83105 + if (lapb->callbacks->disconnect_confirmation)
83106 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
83107 }
83108
83109 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
83110 {
83111 - if (lapb->callbacks.disconnect_indication)
83112 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
83113 + if (lapb->callbacks->disconnect_indication)
83114 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
83115 }
83116
83117 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
83118 {
83119 - if (lapb->callbacks.data_indication)
83120 - return lapb->callbacks.data_indication(lapb->dev, skb);
83121 + if (lapb->callbacks->data_indication)
83122 + return lapb->callbacks->data_indication(lapb->dev, skb);
83123
83124 kfree_skb(skb);
83125 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
83126 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
83127 {
83128 int used = 0;
83129
83130 - if (lapb->callbacks.data_transmit) {
83131 - lapb->callbacks.data_transmit(lapb->dev, skb);
83132 + if (lapb->callbacks->data_transmit) {
83133 + lapb->callbacks->data_transmit(lapb->dev, skb);
83134 used = 1;
83135 }
83136
83137 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
83138 index fe2d3f8..e57f683 100644
83139 --- a/net/mac80211/cfg.c
83140 +++ b/net/mac80211/cfg.c
83141 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
83142 return err;
83143 }
83144
83145 -struct cfg80211_ops mac80211_config_ops = {
83146 +const struct cfg80211_ops mac80211_config_ops = {
83147 .add_virtual_intf = ieee80211_add_iface,
83148 .del_virtual_intf = ieee80211_del_iface,
83149 .change_virtual_intf = ieee80211_change_iface,
83150 diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
83151 index 7d7879f..2d51f62 100644
83152 --- a/net/mac80211/cfg.h
83153 +++ b/net/mac80211/cfg.h
83154 @@ -4,6 +4,6 @@
83155 #ifndef __CFG_H
83156 #define __CFG_H
83157
83158 -extern struct cfg80211_ops mac80211_config_ops;
83159 +extern const struct cfg80211_ops mac80211_config_ops;
83160
83161 #endif /* __CFG_H */
83162 diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
83163 index 99c7525..9cb4937 100644
83164 --- a/net/mac80211/debugfs_key.c
83165 +++ b/net/mac80211/debugfs_key.c
83166 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
83167 size_t count, loff_t *ppos)
83168 {
83169 struct ieee80211_key *key = file->private_data;
83170 - int i, res, bufsize = 2 * key->conf.keylen + 2;
83171 + int i, bufsize = 2 * key->conf.keylen + 2;
83172 char *buf = kmalloc(bufsize, GFP_KERNEL);
83173 char *p = buf;
83174 + ssize_t res;
83175 +
83176 + if (buf == NULL)
83177 + return -ENOMEM;
83178
83179 for (i = 0; i < key->conf.keylen; i++)
83180 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
83181 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
83182 index 33a2e89..08650c8 100644
83183 --- a/net/mac80211/debugfs_sta.c
83184 +++ b/net/mac80211/debugfs_sta.c
83185 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
83186 int i;
83187 struct sta_info *sta = file->private_data;
83188
83189 + pax_track_stack();
83190 +
83191 spin_lock_bh(&sta->lock);
83192 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
83193 sta->ampdu_mlme.dialog_token_allocator + 1);
83194 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
83195 index ca62bfe..6657a03 100644
83196 --- a/net/mac80211/ieee80211_i.h
83197 +++ b/net/mac80211/ieee80211_i.h
83198 @@ -25,6 +25,7 @@
83199 #include <linux/etherdevice.h>
83200 #include <net/cfg80211.h>
83201 #include <net/mac80211.h>
83202 +#include <asm/local.h>
83203 #include "key.h"
83204 #include "sta_info.h"
83205
83206 @@ -635,7 +636,7 @@ struct ieee80211_local {
83207 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
83208 spinlock_t queue_stop_reason_lock;
83209
83210 - int open_count;
83211 + local_t open_count;
83212 int monitors, cooked_mntrs;
83213 /* number of interfaces with corresponding FIF_ flags */
83214 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
83215 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
83216 index 079c500..eb3c6d4 100644
83217 --- a/net/mac80211/iface.c
83218 +++ b/net/mac80211/iface.c
83219 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
83220 break;
83221 }
83222
83223 - if (local->open_count == 0) {
83224 + if (local_read(&local->open_count) == 0) {
83225 res = drv_start(local);
83226 if (res)
83227 goto err_del_bss;
83228 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
83229 * Validate the MAC address for this device.
83230 */
83231 if (!is_valid_ether_addr(dev->dev_addr)) {
83232 - if (!local->open_count)
83233 + if (!local_read(&local->open_count))
83234 drv_stop(local);
83235 return -EADDRNOTAVAIL;
83236 }
83237 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
83238
83239 hw_reconf_flags |= __ieee80211_recalc_idle(local);
83240
83241 - local->open_count++;
83242 + local_inc(&local->open_count);
83243 if (hw_reconf_flags) {
83244 ieee80211_hw_config(local, hw_reconf_flags);
83245 /*
83246 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
83247 err_del_interface:
83248 drv_remove_interface(local, &conf);
83249 err_stop:
83250 - if (!local->open_count)
83251 + if (!local_read(&local->open_count))
83252 drv_stop(local);
83253 err_del_bss:
83254 sdata->bss = NULL;
83255 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
83256 WARN_ON(!list_empty(&sdata->u.ap.vlans));
83257 }
83258
83259 - local->open_count--;
83260 + local_dec(&local->open_count);
83261
83262 switch (sdata->vif.type) {
83263 case NL80211_IFTYPE_AP_VLAN:
83264 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
83265
83266 ieee80211_recalc_ps(local, -1);
83267
83268 - if (local->open_count == 0) {
83269 + if (local_read(&local->open_count) == 0) {
83270 ieee80211_clear_tx_pending(local);
83271 ieee80211_stop_device(local);
83272
83273 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
83274 index 2dfe176..74e4388 100644
83275 --- a/net/mac80211/main.c
83276 +++ b/net/mac80211/main.c
83277 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
83278 local->hw.conf.power_level = power;
83279 }
83280
83281 - if (changed && local->open_count) {
83282 + if (changed && local_read(&local->open_count)) {
83283 ret = drv_config(local, changed);
83284 /*
83285 * Goal:
83286 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
83287 index e67eea7..fcc227e 100644
83288 --- a/net/mac80211/mlme.c
83289 +++ b/net/mac80211/mlme.c
83290 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
83291 bool have_higher_than_11mbit = false, newsta = false;
83292 u16 ap_ht_cap_flags;
83293
83294 + pax_track_stack();
83295 +
83296 /*
83297 * AssocResp and ReassocResp have identical structure, so process both
83298 * of them in this function.
83299 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
83300 index e535f1c..4d733d1 100644
83301 --- a/net/mac80211/pm.c
83302 +++ b/net/mac80211/pm.c
83303 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
83304 }
83305
83306 /* stop hardware - this must stop RX */
83307 - if (local->open_count)
83308 + if (local_read(&local->open_count))
83309 ieee80211_stop_device(local);
83310
83311 local->suspended = true;
83312 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
83313 index b33efc4..0a2efb6 100644
83314 --- a/net/mac80211/rate.c
83315 +++ b/net/mac80211/rate.c
83316 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
83317 struct rate_control_ref *ref, *old;
83318
83319 ASSERT_RTNL();
83320 - if (local->open_count)
83321 + if (local_read(&local->open_count))
83322 return -EBUSY;
83323
83324 ref = rate_control_alloc(name, local);
83325 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
83326 index b1d7904..57e4da7 100644
83327 --- a/net/mac80211/tx.c
83328 +++ b/net/mac80211/tx.c
83329 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
83330 return cpu_to_le16(dur);
83331 }
83332
83333 -static int inline is_ieee80211_device(struct ieee80211_local *local,
83334 +static inline int is_ieee80211_device(struct ieee80211_local *local,
83335 struct net_device *dev)
83336 {
83337 return local == wdev_priv(dev->ieee80211_ptr);
83338 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
83339 index 31b1085..48fb26d 100644
83340 --- a/net/mac80211/util.c
83341 +++ b/net/mac80211/util.c
83342 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
83343 local->resuming = true;
83344
83345 /* restart hardware */
83346 - if (local->open_count) {
83347 + if (local_read(&local->open_count)) {
83348 /*
83349 * Upon resume hardware can sometimes be goofy due to
83350 * various platform / driver / bus issues, so restarting
83351 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
83352 index 634d14a..b35a608 100644
83353 --- a/net/netfilter/Kconfig
83354 +++ b/net/netfilter/Kconfig
83355 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
83356
83357 To compile it as a module, choose M here. If unsure, say N.
83358
83359 +config NETFILTER_XT_MATCH_GRADM
83360 + tristate '"gradm" match support'
83361 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
83362 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
83363 + ---help---
83364 + The gradm match allows to match on grsecurity RBAC being enabled.
83365 + It is useful when iptables rules are applied early on bootup to
83366 + prevent connections to the machine (except from a trusted host)
83367 + while the RBAC system is disabled.
83368 +
83369 config NETFILTER_XT_MATCH_HASHLIMIT
83370 tristate '"hashlimit" match support'
83371 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
83372 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
83373 index 49f62ee..a17b2c6 100644
83374 --- a/net/netfilter/Makefile
83375 +++ b/net/netfilter/Makefile
83376 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
83377 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
83378 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
83379 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
83380 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
83381 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
83382 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
83383 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
83384 diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
83385 index 3c7e427..724043c 100644
83386 --- a/net/netfilter/ipvs/ip_vs_app.c
83387 +++ b/net/netfilter/ipvs/ip_vs_app.c
83388 @@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
83389 .open = ip_vs_app_open,
83390 .read = seq_read,
83391 .llseek = seq_lseek,
83392 - .release = seq_release,
83393 + .release = seq_release_net,
83394 };
83395 #endif
83396
83397 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
83398 index 95682e5..457dbac 100644
83399 --- a/net/netfilter/ipvs/ip_vs_conn.c
83400 +++ b/net/netfilter/ipvs/ip_vs_conn.c
83401 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
83402 /* if the connection is not template and is created
83403 * by sync, preserve the activity flag.
83404 */
83405 - cp->flags |= atomic_read(&dest->conn_flags) &
83406 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
83407 (~IP_VS_CONN_F_INACTIVE);
83408 else
83409 - cp->flags |= atomic_read(&dest->conn_flags);
83410 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
83411 cp->dest = dest;
83412
83413 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
83414 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
83415 atomic_set(&cp->refcnt, 1);
83416
83417 atomic_set(&cp->n_control, 0);
83418 - atomic_set(&cp->in_pkts, 0);
83419 + atomic_set_unchecked(&cp->in_pkts, 0);
83420
83421 atomic_inc(&ip_vs_conn_count);
83422 if (flags & IP_VS_CONN_F_NO_CPORT)
83423 @@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
83424 .open = ip_vs_conn_open,
83425 .read = seq_read,
83426 .llseek = seq_lseek,
83427 - .release = seq_release,
83428 + .release = seq_release_net,
83429 };
83430
83431 static const char *ip_vs_origin_name(unsigned flags)
83432 @@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
83433 .open = ip_vs_conn_sync_open,
83434 .read = seq_read,
83435 .llseek = seq_lseek,
83436 - .release = seq_release,
83437 + .release = seq_release_net,
83438 };
83439
83440 #endif
83441 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
83442
83443 /* Don't drop the entry if its number of incoming packets is not
83444 located in [0, 8] */
83445 - i = atomic_read(&cp->in_pkts);
83446 + i = atomic_read_unchecked(&cp->in_pkts);
83447 if (i > 8 || i < 0) return 0;
83448
83449 if (!todrop_rate[i]) return 0;
83450 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
83451 index b95699f..5fee919 100644
83452 --- a/net/netfilter/ipvs/ip_vs_core.c
83453 +++ b/net/netfilter/ipvs/ip_vs_core.c
83454 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
83455 ret = cp->packet_xmit(skb, cp, pp);
83456 /* do not touch skb anymore */
83457
83458 - atomic_inc(&cp->in_pkts);
83459 + atomic_inc_unchecked(&cp->in_pkts);
83460 ip_vs_conn_put(cp);
83461 return ret;
83462 }
83463 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
83464 * Sync connection if it is about to close to
83465 * encorage the standby servers to update the connections timeout
83466 */
83467 - pkts = atomic_add_return(1, &cp->in_pkts);
83468 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
83469 if (af == AF_INET &&
83470 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
83471 (((cp->protocol != IPPROTO_TCP ||
83472 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
83473 index 02b2610..2d89424 100644
83474 --- a/net/netfilter/ipvs/ip_vs_ctl.c
83475 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
83476 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
83477 ip_vs_rs_hash(dest);
83478 write_unlock_bh(&__ip_vs_rs_lock);
83479 }
83480 - atomic_set(&dest->conn_flags, conn_flags);
83481 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
83482
83483 /* bind the service */
83484 if (!dest->svc) {
83485 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
83486 " %-7s %-6d %-10d %-10d\n",
83487 &dest->addr.in6,
83488 ntohs(dest->port),
83489 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
83490 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
83491 atomic_read(&dest->weight),
83492 atomic_read(&dest->activeconns),
83493 atomic_read(&dest->inactconns));
83494 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
83495 "%-7s %-6d %-10d %-10d\n",
83496 ntohl(dest->addr.ip),
83497 ntohs(dest->port),
83498 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
83499 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
83500 atomic_read(&dest->weight),
83501 atomic_read(&dest->activeconns),
83502 atomic_read(&dest->inactconns));
83503 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
83504 .open = ip_vs_info_open,
83505 .read = seq_read,
83506 .llseek = seq_lseek,
83507 - .release = seq_release_private,
83508 + .release = seq_release_net,
83509 };
83510
83511 #endif
83512 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
83513 .open = ip_vs_stats_seq_open,
83514 .read = seq_read,
83515 .llseek = seq_lseek,
83516 - .release = single_release,
83517 + .release = single_release_net,
83518 };
83519
83520 #endif
83521 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
83522
83523 entry.addr = dest->addr.ip;
83524 entry.port = dest->port;
83525 - entry.conn_flags = atomic_read(&dest->conn_flags);
83526 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
83527 entry.weight = atomic_read(&dest->weight);
83528 entry.u_threshold = dest->u_threshold;
83529 entry.l_threshold = dest->l_threshold;
83530 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
83531 unsigned char arg[128];
83532 int ret = 0;
83533
83534 + pax_track_stack();
83535 +
83536 if (!capable(CAP_NET_ADMIN))
83537 return -EPERM;
83538
83539 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
83540 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
83541
83542 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
83543 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
83544 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
83545 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
83546 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
83547 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
83548 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
83549 index e177f0d..55e8581 100644
83550 --- a/net/netfilter/ipvs/ip_vs_sync.c
83551 +++ b/net/netfilter/ipvs/ip_vs_sync.c
83552 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
83553
83554 if (opt)
83555 memcpy(&cp->in_seq, opt, sizeof(*opt));
83556 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
83557 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
83558 cp->state = state;
83559 cp->old_state = cp->state;
83560 /*
83561 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
83562 index 30b3189..e2e4b55 100644
83563 --- a/net/netfilter/ipvs/ip_vs_xmit.c
83564 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
83565 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
83566 else
83567 rc = NF_ACCEPT;
83568 /* do not touch skb anymore */
83569 - atomic_inc(&cp->in_pkts);
83570 + atomic_inc_unchecked(&cp->in_pkts);
83571 goto out;
83572 }
83573
83574 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
83575 else
83576 rc = NF_ACCEPT;
83577 /* do not touch skb anymore */
83578 - atomic_inc(&cp->in_pkts);
83579 + atomic_inc_unchecked(&cp->in_pkts);
83580 goto out;
83581 }
83582
83583 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
83584 index d521718..d0fd7a1 100644
83585 --- a/net/netfilter/nf_conntrack_netlink.c
83586 +++ b/net/netfilter/nf_conntrack_netlink.c
83587 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
83588 static int
83589 ctnetlink_parse_tuple(const struct nlattr * const cda[],
83590 struct nf_conntrack_tuple *tuple,
83591 - enum ctattr_tuple type, u_int8_t l3num)
83592 + enum ctattr_type type, u_int8_t l3num)
83593 {
83594 struct nlattr *tb[CTA_TUPLE_MAX+1];
83595 int err;
83596 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
83597 index f900dc3..5e45346 100644
83598 --- a/net/netfilter/nfnetlink_log.c
83599 +++ b/net/netfilter/nfnetlink_log.c
83600 @@ -68,7 +68,7 @@ struct nfulnl_instance {
83601 };
83602
83603 static DEFINE_RWLOCK(instances_lock);
83604 -static atomic_t global_seq;
83605 +static atomic_unchecked_t global_seq;
83606
83607 #define INSTANCE_BUCKETS 16
83608 static struct hlist_head instance_table[INSTANCE_BUCKETS];
83609 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
83610 /* global sequence number */
83611 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
83612 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
83613 - htonl(atomic_inc_return(&global_seq)));
83614 + htonl(atomic_inc_return_unchecked(&global_seq)));
83615
83616 if (data_len) {
83617 struct nlattr *nla;
83618 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
83619 new file mode 100644
83620 index 0000000..b1bac76
83621 --- /dev/null
83622 +++ b/net/netfilter/xt_gradm.c
83623 @@ -0,0 +1,51 @@
83624 +/*
83625 + * gradm match for netfilter
83626 + * Copyright © Zbigniew Krzystolik, 2010
83627 + *
83628 + * This program is free software; you can redistribute it and/or modify
83629 + * it under the terms of the GNU General Public License; either version
83630 + * 2 or 3 as published by the Free Software Foundation.
83631 + */
83632 +#include <linux/module.h>
83633 +#include <linux/moduleparam.h>
83634 +#include <linux/skbuff.h>
83635 +#include <linux/netfilter/x_tables.h>
83636 +#include <linux/grsecurity.h>
83637 +#include <linux/netfilter/xt_gradm.h>
83638 +
83639 +static bool
83640 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
83641 +{
83642 + const struct xt_gradm_mtinfo *info = par->matchinfo;
83643 + bool retval = false;
83644 + if (gr_acl_is_enabled())
83645 + retval = true;
83646 + return retval ^ info->invflags;
83647 +}
83648 +
83649 +static struct xt_match gradm_mt_reg __read_mostly = {
83650 + .name = "gradm",
83651 + .revision = 0,
83652 + .family = NFPROTO_UNSPEC,
83653 + .match = gradm_mt,
83654 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
83655 + .me = THIS_MODULE,
83656 +};
83657 +
83658 +static int __init gradm_mt_init(void)
83659 +{
83660 + return xt_register_match(&gradm_mt_reg);
83661 +}
83662 +
83663 +static void __exit gradm_mt_exit(void)
83664 +{
83665 + xt_unregister_match(&gradm_mt_reg);
83666 +}
83667 +
83668 +module_init(gradm_mt_init);
83669 +module_exit(gradm_mt_exit);
83670 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
83671 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
83672 +MODULE_LICENSE("GPL");
83673 +MODULE_ALIAS("ipt_gradm");
83674 +MODULE_ALIAS("ip6t_gradm");
83675 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
83676 index 5a7dcdf..24a3578 100644
83677 --- a/net/netlink/af_netlink.c
83678 +++ b/net/netlink/af_netlink.c
83679 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock *sk)
83680 sk->sk_error_report(sk);
83681 }
83682 }
83683 - atomic_inc(&sk->sk_drops);
83684 + atomic_inc_unchecked(&sk->sk_drops);
83685 }
83686
83687 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
83688 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
83689 struct netlink_sock *nlk = nlk_sk(s);
83690
83691 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
83692 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83693 + NULL,
83694 +#else
83695 s,
83696 +#endif
83697 s->sk_protocol,
83698 nlk->pid,
83699 nlk->groups ? (u32)nlk->groups[0] : 0,
83700 sk_rmem_alloc_get(s),
83701 sk_wmem_alloc_get(s),
83702 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83703 + NULL,
83704 +#else
83705 nlk->cb,
83706 +#endif
83707 atomic_read(&s->sk_refcnt),
83708 - atomic_read(&s->sk_drops)
83709 + atomic_read_unchecked(&s->sk_drops)
83710 );
83711
83712 }
83713 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
83714 index 7a83495..ab0062f 100644
83715 --- a/net/netrom/af_netrom.c
83716 +++ b/net/netrom/af_netrom.c
83717 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
83718 struct sock *sk = sock->sk;
83719 struct nr_sock *nr = nr_sk(sk);
83720
83721 + memset(sax, 0, sizeof(*sax));
83722 lock_sock(sk);
83723 if (peer != 0) {
83724 if (sk->sk_state != TCP_ESTABLISHED) {
83725 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
83726 *uaddr_len = sizeof(struct full_sockaddr_ax25);
83727 } else {
83728 sax->fsa_ax25.sax25_family = AF_NETROM;
83729 - sax->fsa_ax25.sax25_ndigis = 0;
83730 sax->fsa_ax25.sax25_call = nr->source_addr;
83731 *uaddr_len = sizeof(struct sockaddr_ax25);
83732 }
83733 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
83734 index 35cfa79..4e78ff7 100644
83735 --- a/net/packet/af_packet.c
83736 +++ b/net/packet/af_packet.c
83737 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_file *seq, void *v)
83738
83739 seq_printf(seq,
83740 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
83741 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83742 + NULL,
83743 +#else
83744 s,
83745 +#endif
83746 atomic_read(&s->sk_refcnt),
83747 s->sk_type,
83748 ntohs(po->num),
83749 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
83750 index 519ff9d..a422a90 100644
83751 --- a/net/phonet/af_phonet.c
83752 +++ b/net/phonet/af_phonet.c
83753 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(int protocol)
83754 {
83755 struct phonet_protocol *pp;
83756
83757 - if (protocol >= PHONET_NPROTO)
83758 + if (protocol < 0 || protocol >= PHONET_NPROTO)
83759 return NULL;
83760
83761 spin_lock(&proto_tab_lock);
83762 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_register(int protocol,
83763 {
83764 int err = 0;
83765
83766 - if (protocol >= PHONET_NPROTO)
83767 + if (protocol < 0 || protocol >= PHONET_NPROTO)
83768 return -EINVAL;
83769
83770 err = proto_register(pp->prot, 1);
83771 diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
83772 index ef5c75c..2b6c2fa 100644
83773 --- a/net/phonet/datagram.c
83774 +++ b/net/phonet/datagram.c
83775 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
83776 if (err < 0) {
83777 kfree_skb(skb);
83778 if (err == -ENOMEM)
83779 - atomic_inc(&sk->sk_drops);
83780 + atomic_inc_unchecked(&sk->sk_drops);
83781 }
83782 return err ? NET_RX_DROP : NET_RX_SUCCESS;
83783 }
83784 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
83785 index 9cdd35e..16cd850 100644
83786 --- a/net/phonet/pep.c
83787 +++ b/net/phonet/pep.c
83788 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
83789
83790 case PNS_PEP_CTRL_REQ:
83791 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
83792 - atomic_inc(&sk->sk_drops);
83793 + atomic_inc_unchecked(&sk->sk_drops);
83794 break;
83795 }
83796 __skb_pull(skb, 4);
83797 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
83798 if (!err)
83799 return 0;
83800 if (err == -ENOMEM)
83801 - atomic_inc(&sk->sk_drops);
83802 + atomic_inc_unchecked(&sk->sk_drops);
83803 break;
83804 }
83805
83806 if (pn->rx_credits == 0) {
83807 - atomic_inc(&sk->sk_drops);
83808 + atomic_inc_unchecked(&sk->sk_drops);
83809 err = -ENOBUFS;
83810 break;
83811 }
83812 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
83813 index aa5b5a9..c09b4f8 100644
83814 --- a/net/phonet/socket.c
83815 +++ b/net/phonet/socket.c
83816 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
83817 sk->sk_state,
83818 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
83819 sock_i_uid(sk), sock_i_ino(sk),
83820 - atomic_read(&sk->sk_refcnt), sk,
83821 - atomic_read(&sk->sk_drops), &len);
83822 + atomic_read(&sk->sk_refcnt),
83823 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83824 + NULL,
83825 +#else
83826 + sk,
83827 +#endif
83828 + atomic_read_unchecked(&sk->sk_drops), &len);
83829 }
83830 seq_printf(seq, "%*s\n", 127 - len, "");
83831 return 0;
83832 diff --git a/net/rds/Kconfig b/net/rds/Kconfig
83833 index ec753b3..821187c 100644
83834 --- a/net/rds/Kconfig
83835 +++ b/net/rds/Kconfig
83836 @@ -1,7 +1,7 @@
83837
83838 config RDS
83839 tristate "The RDS Protocol (EXPERIMENTAL)"
83840 - depends on INET && EXPERIMENTAL
83841 + depends on INET && EXPERIMENTAL && BROKEN
83842 ---help---
83843 The RDS (Reliable Datagram Sockets) protocol provides reliable,
83844 sequenced delivery of datagrams over Infiniband, iWARP,
83845 diff --git a/net/rds/cong.c b/net/rds/cong.c
83846 index dd2711d..1c7ed12 100644
83847 --- a/net/rds/cong.c
83848 +++ b/net/rds/cong.c
83849 @@ -77,7 +77,7 @@
83850 * finds that the saved generation number is smaller than the global generation
83851 * number, it wakes up the process.
83852 */
83853 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
83854 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
83855
83856 /*
83857 * Congestion monitoring
83858 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
83859 rdsdebug("waking map %p for %pI4\n",
83860 map, &map->m_addr);
83861 rds_stats_inc(s_cong_update_received);
83862 - atomic_inc(&rds_cong_generation);
83863 + atomic_inc_unchecked(&rds_cong_generation);
83864 if (waitqueue_active(&map->m_waitq))
83865 wake_up(&map->m_waitq);
83866 if (waitqueue_active(&rds_poll_waitq))
83867 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
83868
83869 int rds_cong_updated_since(unsigned long *recent)
83870 {
83871 - unsigned long gen = atomic_read(&rds_cong_generation);
83872 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
83873
83874 if (likely(*recent == gen))
83875 return 0;
83876 diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
83877 index de4a1b1..94ec861 100644
83878 --- a/net/rds/iw_rdma.c
83879 +++ b/net/rds/iw_rdma.c
83880 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
83881 struct rdma_cm_id *pcm_id;
83882 int rc;
83883
83884 + pax_track_stack();
83885 +
83886 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
83887 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
83888
83889 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
83890 index b5198ae..8b9fb90 100644
83891 --- a/net/rds/tcp.c
83892 +++ b/net/rds/tcp.c
83893 @@ -57,7 +57,7 @@ void rds_tcp_nonagle(struct socket *sock)
83894 int val = 1;
83895
83896 set_fs(KERNEL_DS);
83897 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
83898 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
83899 sizeof(val));
83900 set_fs(oldfs);
83901 }
83902 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
83903 index ab545e0..4079b3b 100644
83904 --- a/net/rds/tcp_send.c
83905 +++ b/net/rds/tcp_send.c
83906 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
83907
83908 oldfs = get_fs();
83909 set_fs(KERNEL_DS);
83910 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
83911 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
83912 sizeof(val));
83913 set_fs(oldfs);
83914 }
83915 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
83916 index a86afce..8657bce 100644
83917 --- a/net/rxrpc/af_rxrpc.c
83918 +++ b/net/rxrpc/af_rxrpc.c
83919 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_ops;
83920 __be32 rxrpc_epoch;
83921
83922 /* current debugging ID */
83923 -atomic_t rxrpc_debug_id;
83924 +atomic_unchecked_t rxrpc_debug_id;
83925
83926 /* count of skbs currently in use */
83927 atomic_t rxrpc_n_skbs;
83928 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
83929 index b4a2209..539106c 100644
83930 --- a/net/rxrpc/ar-ack.c
83931 +++ b/net/rxrpc/ar-ack.c
83932 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
83933
83934 _enter("{%d,%d,%d,%d},",
83935 call->acks_hard, call->acks_unacked,
83936 - atomic_read(&call->sequence),
83937 + atomic_read_unchecked(&call->sequence),
83938 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
83939
83940 stop = 0;
83941 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
83942
83943 /* each Tx packet has a new serial number */
83944 sp->hdr.serial =
83945 - htonl(atomic_inc_return(&call->conn->serial));
83946 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
83947
83948 hdr = (struct rxrpc_header *) txb->head;
83949 hdr->serial = sp->hdr.serial;
83950 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
83951 */
83952 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
83953 {
83954 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
83955 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
83956 }
83957
83958 /*
83959 @@ -627,7 +627,7 @@ process_further:
83960
83961 latest = ntohl(sp->hdr.serial);
83962 hard = ntohl(ack.firstPacket);
83963 - tx = atomic_read(&call->sequence);
83964 + tx = atomic_read_unchecked(&call->sequence);
83965
83966 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
83967 latest,
83968 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_struct *work)
83969 u32 abort_code = RX_PROTOCOL_ERROR;
83970 u8 *acks = NULL;
83971
83972 + pax_track_stack();
83973 +
83974 //printk("\n--------------------\n");
83975 _enter("{%d,%s,%lx} [%lu]",
83976 call->debug_id, rxrpc_call_states[call->state], call->events,
83977 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
83978 goto maybe_reschedule;
83979
83980 send_ACK_with_skew:
83981 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
83982 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
83983 ntohl(ack.serial));
83984 send_ACK:
83985 mtu = call->conn->trans->peer->if_mtu;
83986 @@ -1171,7 +1173,7 @@ send_ACK:
83987 ackinfo.rxMTU = htonl(5692);
83988 ackinfo.jumbo_max = htonl(4);
83989
83990 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
83991 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
83992 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
83993 ntohl(hdr.serial),
83994 ntohs(ack.maxSkew),
83995 @@ -1189,7 +1191,7 @@ send_ACK:
83996 send_message:
83997 _debug("send message");
83998
83999 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
84000 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
84001 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
84002 send_message_2:
84003
84004 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
84005 index bc0019f..e1b4b24 100644
84006 --- a/net/rxrpc/ar-call.c
84007 +++ b/net/rxrpc/ar-call.c
84008 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
84009 spin_lock_init(&call->lock);
84010 rwlock_init(&call->state_lock);
84011 atomic_set(&call->usage, 1);
84012 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
84013 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84014 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
84015
84016 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
84017 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
84018 index 9f1ce84..ff8d061 100644
84019 --- a/net/rxrpc/ar-connection.c
84020 +++ b/net/rxrpc/ar-connection.c
84021 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
84022 rwlock_init(&conn->lock);
84023 spin_lock_init(&conn->state_lock);
84024 atomic_set(&conn->usage, 1);
84025 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
84026 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84027 conn->avail_calls = RXRPC_MAXCALLS;
84028 conn->size_align = 4;
84029 conn->header_size = sizeof(struct rxrpc_header);
84030 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
84031 index 0505cdc..f0748ce 100644
84032 --- a/net/rxrpc/ar-connevent.c
84033 +++ b/net/rxrpc/ar-connevent.c
84034 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
84035
84036 len = iov[0].iov_len + iov[1].iov_len;
84037
84038 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
84039 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
84040 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
84041
84042 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
84043 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
84044 index f98c802..9e8488e 100644
84045 --- a/net/rxrpc/ar-input.c
84046 +++ b/net/rxrpc/ar-input.c
84047 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
84048 /* track the latest serial number on this connection for ACK packet
84049 * information */
84050 serial = ntohl(sp->hdr.serial);
84051 - hi_serial = atomic_read(&call->conn->hi_serial);
84052 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
84053 while (serial > hi_serial)
84054 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
84055 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
84056 serial);
84057
84058 /* request ACK generation for any ACK or DATA packet that requests
84059 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
84060 index 7043b29..06edcdf 100644
84061 --- a/net/rxrpc/ar-internal.h
84062 +++ b/net/rxrpc/ar-internal.h
84063 @@ -272,8 +272,8 @@ struct rxrpc_connection {
84064 int error; /* error code for local abort */
84065 int debug_id; /* debug ID for printks */
84066 unsigned call_counter; /* call ID counter */
84067 - atomic_t serial; /* packet serial number counter */
84068 - atomic_t hi_serial; /* highest serial number received */
84069 + atomic_unchecked_t serial; /* packet serial number counter */
84070 + atomic_unchecked_t hi_serial; /* highest serial number received */
84071 u8 avail_calls; /* number of calls available */
84072 u8 size_align; /* data size alignment (for security) */
84073 u8 header_size; /* rxrpc + security header size */
84074 @@ -346,7 +346,7 @@ struct rxrpc_call {
84075 spinlock_t lock;
84076 rwlock_t state_lock; /* lock for state transition */
84077 atomic_t usage;
84078 - atomic_t sequence; /* Tx data packet sequence counter */
84079 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
84080 u32 abort_code; /* local/remote abort code */
84081 enum { /* current state of call */
84082 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
84083 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
84084 */
84085 extern atomic_t rxrpc_n_skbs;
84086 extern __be32 rxrpc_epoch;
84087 -extern atomic_t rxrpc_debug_id;
84088 +extern atomic_unchecked_t rxrpc_debug_id;
84089 extern struct workqueue_struct *rxrpc_workqueue;
84090
84091 /*
84092 diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
84093 index 74697b2..10f9b77 100644
84094 --- a/net/rxrpc/ar-key.c
84095 +++ b/net/rxrpc/ar-key.c
84096 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
84097 return ret;
84098
84099 plen -= sizeof(*token);
84100 - token = kmalloc(sizeof(*token), GFP_KERNEL);
84101 + token = kzalloc(sizeof(*token), GFP_KERNEL);
84102 if (!token)
84103 return -ENOMEM;
84104
84105 - token->kad = kmalloc(plen, GFP_KERNEL);
84106 + token->kad = kzalloc(plen, GFP_KERNEL);
84107 if (!token->kad) {
84108 kfree(token);
84109 return -ENOMEM;
84110 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
84111 goto error;
84112
84113 ret = -ENOMEM;
84114 - token = kmalloc(sizeof(*token), GFP_KERNEL);
84115 + token = kzalloc(sizeof(*token), GFP_KERNEL);
84116 if (!token)
84117 goto error;
84118 - token->kad = kmalloc(plen, GFP_KERNEL);
84119 + token->kad = kzalloc(plen, GFP_KERNEL);
84120 if (!token->kad)
84121 goto error_free;
84122
84123 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
84124 index 807535f..5b7f19e 100644
84125 --- a/net/rxrpc/ar-local.c
84126 +++ b/net/rxrpc/ar-local.c
84127 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
84128 spin_lock_init(&local->lock);
84129 rwlock_init(&local->services_lock);
84130 atomic_set(&local->usage, 1);
84131 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
84132 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84133 memcpy(&local->srx, srx, sizeof(*srx));
84134 }
84135
84136 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
84137 index cc9102c..7d3888e 100644
84138 --- a/net/rxrpc/ar-output.c
84139 +++ b/net/rxrpc/ar-output.c
84140 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
84141 sp->hdr.cid = call->cid;
84142 sp->hdr.callNumber = call->call_id;
84143 sp->hdr.seq =
84144 - htonl(atomic_inc_return(&call->sequence));
84145 + htonl(atomic_inc_return_unchecked(&call->sequence));
84146 sp->hdr.serial =
84147 - htonl(atomic_inc_return(&conn->serial));
84148 + htonl(atomic_inc_return_unchecked(&conn->serial));
84149 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
84150 sp->hdr.userStatus = 0;
84151 sp->hdr.securityIndex = conn->security_ix;
84152 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
84153 index edc026c..4bd4e2d 100644
84154 --- a/net/rxrpc/ar-peer.c
84155 +++ b/net/rxrpc/ar-peer.c
84156 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
84157 INIT_LIST_HEAD(&peer->error_targets);
84158 spin_lock_init(&peer->lock);
84159 atomic_set(&peer->usage, 1);
84160 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
84161 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84162 memcpy(&peer->srx, srx, sizeof(*srx));
84163
84164 rxrpc_assess_MTU_size(peer);
84165 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
84166 index 38047f7..9f48511 100644
84167 --- a/net/rxrpc/ar-proc.c
84168 +++ b/net/rxrpc/ar-proc.c
84169 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
84170 atomic_read(&conn->usage),
84171 rxrpc_conn_states[conn->state],
84172 key_serial(conn->key),
84173 - atomic_read(&conn->serial),
84174 - atomic_read(&conn->hi_serial));
84175 + atomic_read_unchecked(&conn->serial),
84176 + atomic_read_unchecked(&conn->hi_serial));
84177
84178 return 0;
84179 }
84180 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
84181 index 0936e1a..437c640 100644
84182 --- a/net/rxrpc/ar-transport.c
84183 +++ b/net/rxrpc/ar-transport.c
84184 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
84185 spin_lock_init(&trans->client_lock);
84186 rwlock_init(&trans->conn_lock);
84187 atomic_set(&trans->usage, 1);
84188 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
84189 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84190
84191 if (peer->srx.transport.family == AF_INET) {
84192 switch (peer->srx.transport_type) {
84193 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
84194 index 713ac59..306f6ae 100644
84195 --- a/net/rxrpc/rxkad.c
84196 +++ b/net/rxrpc/rxkad.c
84197 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
84198 u16 check;
84199 int nsg;
84200
84201 + pax_track_stack();
84202 +
84203 sp = rxrpc_skb(skb);
84204
84205 _enter("");
84206 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
84207 u16 check;
84208 int nsg;
84209
84210 + pax_track_stack();
84211 +
84212 _enter("");
84213
84214 sp = rxrpc_skb(skb);
84215 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
84216
84217 len = iov[0].iov_len + iov[1].iov_len;
84218
84219 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
84220 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
84221 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
84222
84223 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
84224 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
84225
84226 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
84227
84228 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
84229 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
84230 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
84231
84232 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
84233 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
84234 index 914c419..7a16d2c 100644
84235 --- a/net/sctp/auth.c
84236 +++ b/net/sctp/auth.c
84237 @@ -81,7 +81,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
84238 struct sctp_auth_bytes *key;
84239
84240 /* Verify that we are not going to overflow INT_MAX */
84241 - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
84242 + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
84243 return NULL;
84244
84245 /* Allocate the shared key */
84246 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
84247 index d093cbf..9fc36fc 100644
84248 --- a/net/sctp/proc.c
84249 +++ b/net/sctp/proc.c
84250 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
84251 sctp_for_each_hentry(epb, node, &head->chain) {
84252 ep = sctp_ep(epb);
84253 sk = epb->sk;
84254 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
84255 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
84256 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84257 + NULL, NULL,
84258 +#else
84259 + ep, sk,
84260 +#endif
84261 sctp_sk(sk)->type, sk->sk_state, hash,
84262 epb->bind_addr.port,
84263 sock_i_uid(sk), sock_i_ino(sk));
84264 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
84265 seq_printf(seq,
84266 "%8p %8p %-3d %-3d %-2d %-4d "
84267 "%4d %8d %8d %7d %5lu %-5d %5d ",
84268 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
84269 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84270 + NULL, NULL,
84271 +#else
84272 + assoc, sk,
84273 +#endif
84274 + sctp_sk(sk)->type, sk->sk_state,
84275 assoc->state, hash,
84276 assoc->assoc_id,
84277 assoc->sndbuf_used,
84278 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
84279 index 3a95fcb..c40fc1d 100644
84280 --- a/net/sctp/socket.c
84281 +++ b/net/sctp/socket.c
84282 @@ -5802,7 +5802,6 @@ pp_found:
84283 */
84284 int reuse = sk->sk_reuse;
84285 struct sock *sk2;
84286 - struct hlist_node *node;
84287
84288 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
84289 if (pp->fastreuse && sk->sk_reuse &&
84290 diff --git a/net/socket.c b/net/socket.c
84291 index d449812..4ac08d3c 100644
84292 --- a/net/socket.c
84293 +++ b/net/socket.c
84294 @@ -87,6 +87,7 @@
84295 #include <linux/wireless.h>
84296 #include <linux/nsproxy.h>
84297 #include <linux/magic.h>
84298 +#include <linux/in.h>
84299
84300 #include <asm/uaccess.h>
84301 #include <asm/unistd.h>
84302 @@ -97,6 +98,21 @@
84303 #include <net/sock.h>
84304 #include <linux/netfilter.h>
84305
84306 +extern void gr_attach_curr_ip(const struct sock *sk);
84307 +extern int gr_handle_sock_all(const int family, const int type,
84308 + const int protocol);
84309 +extern int gr_handle_sock_server(const struct sockaddr *sck);
84310 +extern int gr_handle_sock_server_other(const struct sock *sck);
84311 +extern int gr_handle_sock_client(const struct sockaddr *sck);
84312 +extern int gr_search_connect(struct socket * sock,
84313 + struct sockaddr_in * addr);
84314 +extern int gr_search_bind(struct socket * sock,
84315 + struct sockaddr_in * addr);
84316 +extern int gr_search_listen(struct socket * sock);
84317 +extern int gr_search_accept(struct socket * sock);
84318 +extern int gr_search_socket(const int domain, const int type,
84319 + const int protocol);
84320 +
84321 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
84322 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
84323 unsigned long nr_segs, loff_t pos);
84324 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_system_type *fs_type,
84325 mnt);
84326 }
84327
84328 -static struct vfsmount *sock_mnt __read_mostly;
84329 +struct vfsmount *sock_mnt __read_mostly;
84330
84331 static struct file_system_type sock_fs_type = {
84332 .name = "sockfs",
84333 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net, int family, int type, int protocol,
84334 return -EAFNOSUPPORT;
84335 if (type < 0 || type >= SOCK_MAX)
84336 return -EINVAL;
84337 + if (protocol < 0)
84338 + return -EINVAL;
84339
84340 /* Compatibility.
84341
84342 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
84343 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
84344 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
84345
84346 + if(!gr_search_socket(family, type, protocol)) {
84347 + retval = -EACCES;
84348 + goto out;
84349 + }
84350 +
84351 + if (gr_handle_sock_all(family, type, protocol)) {
84352 + retval = -EACCES;
84353 + goto out;
84354 + }
84355 +
84356 retval = sock_create(family, type, protocol, &sock);
84357 if (retval < 0)
84358 goto out;
84359 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
84360 if (sock) {
84361 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
84362 if (err >= 0) {
84363 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
84364 + err = -EACCES;
84365 + goto error;
84366 + }
84367 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
84368 + if (err)
84369 + goto error;
84370 +
84371 err = security_socket_bind(sock,
84372 (struct sockaddr *)&address,
84373 addrlen);
84374 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
84375 (struct sockaddr *)
84376 &address, addrlen);
84377 }
84378 +error:
84379 fput_light(sock->file, fput_needed);
84380 }
84381 return err;
84382 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
84383 if ((unsigned)backlog > somaxconn)
84384 backlog = somaxconn;
84385
84386 + if (gr_handle_sock_server_other(sock->sk)) {
84387 + err = -EPERM;
84388 + goto error;
84389 + }
84390 +
84391 + err = gr_search_listen(sock);
84392 + if (err)
84393 + goto error;
84394 +
84395 err = security_socket_listen(sock, backlog);
84396 if (!err)
84397 err = sock->ops->listen(sock, backlog);
84398
84399 +error:
84400 fput_light(sock->file, fput_needed);
84401 }
84402 return err;
84403 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
84404 newsock->type = sock->type;
84405 newsock->ops = sock->ops;
84406
84407 + if (gr_handle_sock_server_other(sock->sk)) {
84408 + err = -EPERM;
84409 + sock_release(newsock);
84410 + goto out_put;
84411 + }
84412 +
84413 + err = gr_search_accept(sock);
84414 + if (err) {
84415 + sock_release(newsock);
84416 + goto out_put;
84417 + }
84418 +
84419 /*
84420 * We don't need try_module_get here, as the listening socket (sock)
84421 * has the protocol module (sock->ops->owner) held.
84422 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
84423 fd_install(newfd, newfile);
84424 err = newfd;
84425
84426 + gr_attach_curr_ip(newsock->sk);
84427 +
84428 out_put:
84429 fput_light(sock->file, fput_needed);
84430 out:
84431 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
84432 int, addrlen)
84433 {
84434 struct socket *sock;
84435 + struct sockaddr *sck;
84436 struct sockaddr_storage address;
84437 int err, fput_needed;
84438
84439 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
84440 if (err < 0)
84441 goto out_put;
84442
84443 + sck = (struct sockaddr *)&address;
84444 +
84445 + if (gr_handle_sock_client(sck)) {
84446 + err = -EACCES;
84447 + goto out_put;
84448 + }
84449 +
84450 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
84451 + if (err)
84452 + goto out_put;
84453 +
84454 err =
84455 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
84456 if (err)
84457 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
84458 int err, ctl_len, iov_size, total_len;
84459 int fput_needed;
84460
84461 + pax_track_stack();
84462 +
84463 err = -EFAULT;
84464 if (MSG_CMSG_COMPAT & flags) {
84465 if (get_compat_msghdr(&msg_sys, msg_compat))
84466 @@ -2022,7 +2097,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
84467 * kernel msghdr to use the kernel address space)
84468 */
84469
84470 - uaddr = (__force void __user *)msg_sys.msg_name;
84471 + uaddr = (void __force_user *)msg_sys.msg_name;
84472 uaddr_len = COMPAT_NAMELEN(msg);
84473 if (MSG_CMSG_COMPAT & flags) {
84474 err = verify_compat_iovec(&msg_sys, iov,
84475 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
84476 index ac94477..8afe5c3 100644
84477 --- a/net/sunrpc/sched.c
84478 +++ b/net/sunrpc/sched.c
84479 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *word)
84480 #ifdef RPC_DEBUG
84481 static void rpc_task_set_debuginfo(struct rpc_task *task)
84482 {
84483 - static atomic_t rpc_pid;
84484 + static atomic_unchecked_t rpc_pid;
84485
84486 task->tk_magic = RPC_TASK_MAGIC_ID;
84487 - task->tk_pid = atomic_inc_return(&rpc_pid);
84488 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
84489 }
84490 #else
84491 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
84492 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
84493 index 35fb68b..236a8bf 100644
84494 --- a/net/sunrpc/xprtrdma/svc_rdma.c
84495 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
84496 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
84497 static unsigned int min_max_inline = 4096;
84498 static unsigned int max_max_inline = 65536;
84499
84500 -atomic_t rdma_stat_recv;
84501 -atomic_t rdma_stat_read;
84502 -atomic_t rdma_stat_write;
84503 -atomic_t rdma_stat_sq_starve;
84504 -atomic_t rdma_stat_rq_starve;
84505 -atomic_t rdma_stat_rq_poll;
84506 -atomic_t rdma_stat_rq_prod;
84507 -atomic_t rdma_stat_sq_poll;
84508 -atomic_t rdma_stat_sq_prod;
84509 +atomic_unchecked_t rdma_stat_recv;
84510 +atomic_unchecked_t rdma_stat_read;
84511 +atomic_unchecked_t rdma_stat_write;
84512 +atomic_unchecked_t rdma_stat_sq_starve;
84513 +atomic_unchecked_t rdma_stat_rq_starve;
84514 +atomic_unchecked_t rdma_stat_rq_poll;
84515 +atomic_unchecked_t rdma_stat_rq_prod;
84516 +atomic_unchecked_t rdma_stat_sq_poll;
84517 +atomic_unchecked_t rdma_stat_sq_prod;
84518
84519 /* Temporary NFS request map and context caches */
84520 struct kmem_cache *svc_rdma_map_cachep;
84521 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *table, int write,
84522 len -= *ppos;
84523 if (len > *lenp)
84524 len = *lenp;
84525 - if (len && copy_to_user(buffer, str_buf, len))
84526 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
84527 return -EFAULT;
84528 *lenp = len;
84529 *ppos += len;
84530 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] = {
84531 {
84532 .procname = "rdma_stat_read",
84533 .data = &rdma_stat_read,
84534 - .maxlen = sizeof(atomic_t),
84535 + .maxlen = sizeof(atomic_unchecked_t),
84536 .mode = 0644,
84537 .proc_handler = &read_reset_stat,
84538 },
84539 {
84540 .procname = "rdma_stat_recv",
84541 .data = &rdma_stat_recv,
84542 - .maxlen = sizeof(atomic_t),
84543 + .maxlen = sizeof(atomic_unchecked_t),
84544 .mode = 0644,
84545 .proc_handler = &read_reset_stat,
84546 },
84547 {
84548 .procname = "rdma_stat_write",
84549 .data = &rdma_stat_write,
84550 - .maxlen = sizeof(atomic_t),
84551 + .maxlen = sizeof(atomic_unchecked_t),
84552 .mode = 0644,
84553 .proc_handler = &read_reset_stat,
84554 },
84555 {
84556 .procname = "rdma_stat_sq_starve",
84557 .data = &rdma_stat_sq_starve,
84558 - .maxlen = sizeof(atomic_t),
84559 + .maxlen = sizeof(atomic_unchecked_t),
84560 .mode = 0644,
84561 .proc_handler = &read_reset_stat,
84562 },
84563 {
84564 .procname = "rdma_stat_rq_starve",
84565 .data = &rdma_stat_rq_starve,
84566 - .maxlen = sizeof(atomic_t),
84567 + .maxlen = sizeof(atomic_unchecked_t),
84568 .mode = 0644,
84569 .proc_handler = &read_reset_stat,
84570 },
84571 {
84572 .procname = "rdma_stat_rq_poll",
84573 .data = &rdma_stat_rq_poll,
84574 - .maxlen = sizeof(atomic_t),
84575 + .maxlen = sizeof(atomic_unchecked_t),
84576 .mode = 0644,
84577 .proc_handler = &read_reset_stat,
84578 },
84579 {
84580 .procname = "rdma_stat_rq_prod",
84581 .data = &rdma_stat_rq_prod,
84582 - .maxlen = sizeof(atomic_t),
84583 + .maxlen = sizeof(atomic_unchecked_t),
84584 .mode = 0644,
84585 .proc_handler = &read_reset_stat,
84586 },
84587 {
84588 .procname = "rdma_stat_sq_poll",
84589 .data = &rdma_stat_sq_poll,
84590 - .maxlen = sizeof(atomic_t),
84591 + .maxlen = sizeof(atomic_unchecked_t),
84592 .mode = 0644,
84593 .proc_handler = &read_reset_stat,
84594 },
84595 {
84596 .procname = "rdma_stat_sq_prod",
84597 .data = &rdma_stat_sq_prod,
84598 - .maxlen = sizeof(atomic_t),
84599 + .maxlen = sizeof(atomic_unchecked_t),
84600 .mode = 0644,
84601 .proc_handler = &read_reset_stat,
84602 },
84603 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
84604 index 9e88438..8ed5cf0 100644
84605 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
84606 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
84607 @@ -495,7 +495,7 @@ next_sge:
84608 svc_rdma_put_context(ctxt, 0);
84609 goto out;
84610 }
84611 - atomic_inc(&rdma_stat_read);
84612 + atomic_inc_unchecked(&rdma_stat_read);
84613
84614 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
84615 chl_map->ch[ch_no].count -= read_wr.num_sge;
84616 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
84617 dto_q);
84618 list_del_init(&ctxt->dto_q);
84619 } else {
84620 - atomic_inc(&rdma_stat_rq_starve);
84621 + atomic_inc_unchecked(&rdma_stat_rq_starve);
84622 clear_bit(XPT_DATA, &xprt->xpt_flags);
84623 ctxt = NULL;
84624 }
84625 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
84626 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
84627 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
84628 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
84629 - atomic_inc(&rdma_stat_recv);
84630 + atomic_inc_unchecked(&rdma_stat_recv);
84631
84632 /* Build up the XDR from the receive buffers. */
84633 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
84634 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
84635 index f11be72..7aad4e8 100644
84636 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
84637 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
84638 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
84639 write_wr.wr.rdma.remote_addr = to;
84640
84641 /* Post It */
84642 - atomic_inc(&rdma_stat_write);
84643 + atomic_inc_unchecked(&rdma_stat_write);
84644 if (svc_rdma_send(xprt, &write_wr))
84645 goto err;
84646 return 0;
84647 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
84648 index 3fa5751..030ba89 100644
84649 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
84650 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
84651 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
84652 return;
84653
84654 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
84655 - atomic_inc(&rdma_stat_rq_poll);
84656 + atomic_inc_unchecked(&rdma_stat_rq_poll);
84657
84658 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
84659 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
84660 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
84661 }
84662
84663 if (ctxt)
84664 - atomic_inc(&rdma_stat_rq_prod);
84665 + atomic_inc_unchecked(&rdma_stat_rq_prod);
84666
84667 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
84668 /*
84669 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
84670 return;
84671
84672 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
84673 - atomic_inc(&rdma_stat_sq_poll);
84674 + atomic_inc_unchecked(&rdma_stat_sq_poll);
84675 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
84676 if (wc.status != IB_WC_SUCCESS)
84677 /* Close the transport */
84678 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
84679 }
84680
84681 if (ctxt)
84682 - atomic_inc(&rdma_stat_sq_prod);
84683 + atomic_inc_unchecked(&rdma_stat_sq_prod);
84684 }
84685
84686 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
84687 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
84688 spin_lock_bh(&xprt->sc_lock);
84689 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
84690 spin_unlock_bh(&xprt->sc_lock);
84691 - atomic_inc(&rdma_stat_sq_starve);
84692 + atomic_inc_unchecked(&rdma_stat_sq_starve);
84693
84694 /* See if we can opportunistically reap SQ WR to make room */
84695 sq_cq_reap(xprt);
84696 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
84697 index 0b15d72..7934fbb 100644
84698 --- a/net/sysctl_net.c
84699 +++ b/net/sysctl_net.c
84700 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
84701 struct ctl_table *table)
84702 {
84703 /* Allow network administrator to have same access as root. */
84704 - if (capable(CAP_NET_ADMIN)) {
84705 + if (capable_nolog(CAP_NET_ADMIN)) {
84706 int mode = (table->mode >> 6) & 7;
84707 return (mode << 6) | (mode << 3) | mode;
84708 }
84709 diff --git a/net/tipc/link.c b/net/tipc/link.c
84710 index dd4c18b..f40d38d 100644
84711 --- a/net/tipc/link.c
84712 +++ b/net/tipc/link.c
84713 @@ -1418,7 +1418,7 @@ again:
84714
84715 if (!sect_rest) {
84716 sect_rest = msg_sect[++curr_sect].iov_len;
84717 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
84718 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
84719 }
84720
84721 if (sect_rest < fragm_rest)
84722 @@ -1437,7 +1437,7 @@ error:
84723 }
84724 } else
84725 skb_copy_to_linear_data_offset(buf, fragm_crs,
84726 - sect_crs, sz);
84727 + (const void __force_kernel *)sect_crs, sz);
84728 sect_crs += sz;
84729 sect_rest -= sz;
84730 fragm_crs += sz;
84731 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
84732 index 0747d8a..e8bf3f3 100644
84733 --- a/net/tipc/subscr.c
84734 +++ b/net/tipc/subscr.c
84735 @@ -104,7 +104,7 @@ static void subscr_send_event(struct subscription *sub,
84736 {
84737 struct iovec msg_sect;
84738
84739 - msg_sect.iov_base = (void *)&sub->evt;
84740 + msg_sect.iov_base = (void __force_user *)&sub->evt;
84741 msg_sect.iov_len = sizeof(struct tipc_event);
84742
84743 sub->evt.event = htohl(event, sub->swap);
84744 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
84745 index db8d51a..608692d 100644
84746 --- a/net/unix/af_unix.c
84747 +++ b/net/unix/af_unix.c
84748 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(struct net *net,
84749 err = -ECONNREFUSED;
84750 if (!S_ISSOCK(inode->i_mode))
84751 goto put_fail;
84752 +
84753 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
84754 + err = -EACCES;
84755 + goto put_fail;
84756 + }
84757 +
84758 u = unix_find_socket_byinode(net, inode);
84759 if (!u)
84760 goto put_fail;
84761 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(struct net *net,
84762 if (u) {
84763 struct dentry *dentry;
84764 dentry = unix_sk(u)->dentry;
84765 +
84766 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
84767 + err = -EPERM;
84768 + sock_put(u);
84769 + goto fail;
84770 + }
84771 +
84772 if (dentry)
84773 touch_atime(unix_sk(u)->mnt, dentry);
84774 } else
84775 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
84776 err = security_path_mknod(&nd.path, dentry, mode, 0);
84777 if (err)
84778 goto out_mknod_drop_write;
84779 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
84780 + err = -EACCES;
84781 + goto out_mknod_drop_write;
84782 + }
84783 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
84784 out_mknod_drop_write:
84785 mnt_drop_write(nd.path.mnt);
84786 if (err)
84787 goto out_mknod_dput;
84788 +
84789 + gr_handle_create(dentry, nd.path.mnt);
84790 +
84791 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
84792 dput(nd.path.dentry);
84793 nd.path.dentry = dentry;
84794 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file *seq, void *v)
84795 unix_state_lock(s);
84796
84797 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
84798 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84799 + NULL,
84800 +#else
84801 s,
84802 +#endif
84803 atomic_read(&s->sk_refcnt),
84804 0,
84805 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
84806 diff --git a/net/wireless/core.h b/net/wireless/core.h
84807 index 376798f..109a61f 100644
84808 --- a/net/wireless/core.h
84809 +++ b/net/wireless/core.h
84810 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
84811 struct mutex mtx;
84812
84813 /* rfkill support */
84814 - struct rfkill_ops rfkill_ops;
84815 + rfkill_ops_no_const rfkill_ops;
84816 struct rfkill *rfkill;
84817 struct work_struct rfkill_sync;
84818
84819 diff --git a/net/wireless/wext.c b/net/wireless/wext.c
84820 index a2e4c60..0979cbe 100644
84821 --- a/net/wireless/wext.c
84822 +++ b/net/wireless/wext.c
84823 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
84824 */
84825
84826 /* Support for very large requests */
84827 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
84828 - (user_length > descr->max_tokens)) {
84829 + if (user_length > descr->max_tokens) {
84830 /* Allow userspace to GET more than max so
84831 * we can support any size GET requests.
84832 * There is still a limit : -ENOMEM.
84833 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
84834 }
84835 }
84836
84837 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
84838 - /*
84839 - * If this is a GET, but not NOMAX, it means that the extra
84840 - * data is not bounded by userspace, but by max_tokens. Thus
84841 - * set the length to max_tokens. This matches the extra data
84842 - * allocation.
84843 - * The driver should fill it with the number of tokens it
84844 - * provided, and it may check iwp->length rather than having
84845 - * knowledge of max_tokens. If the driver doesn't change the
84846 - * iwp->length, this ioctl just copies back max_token tokens
84847 - * filled with zeroes. Hopefully the driver isn't claiming
84848 - * them to be valid data.
84849 - */
84850 - iwp->length = descr->max_tokens;
84851 - }
84852 -
84853 err = handler(dev, info, (union iwreq_data *) iwp, extra);
84854
84855 iwp->length += essid_compat;
84856 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
84857 index cb81ca3..e15d49a 100644
84858 --- a/net/xfrm/xfrm_policy.c
84859 +++ b/net/xfrm/xfrm_policy.c
84860 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
84861 hlist_add_head(&policy->bydst, chain);
84862 xfrm_pol_hold(policy);
84863 net->xfrm.policy_count[dir]++;
84864 - atomic_inc(&flow_cache_genid);
84865 + atomic_inc_unchecked(&flow_cache_genid);
84866 if (delpol)
84867 __xfrm_policy_unlink(delpol, dir);
84868 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
84869 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
84870 write_unlock_bh(&xfrm_policy_lock);
84871
84872 if (ret && delete) {
84873 - atomic_inc(&flow_cache_genid);
84874 + atomic_inc_unchecked(&flow_cache_genid);
84875 xfrm_policy_kill(ret);
84876 }
84877 return ret;
84878 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
84879 write_unlock_bh(&xfrm_policy_lock);
84880
84881 if (ret && delete) {
84882 - atomic_inc(&flow_cache_genid);
84883 + atomic_inc_unchecked(&flow_cache_genid);
84884 xfrm_policy_kill(ret);
84885 }
84886 return ret;
84887 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
84888 }
84889
84890 }
84891 - atomic_inc(&flow_cache_genid);
84892 + atomic_inc_unchecked(&flow_cache_genid);
84893 out:
84894 write_unlock_bh(&xfrm_policy_lock);
84895 return err;
84896 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
84897 write_unlock_bh(&xfrm_policy_lock);
84898 if (pol) {
84899 if (dir < XFRM_POLICY_MAX)
84900 - atomic_inc(&flow_cache_genid);
84901 + atomic_inc_unchecked(&flow_cache_genid);
84902 xfrm_policy_kill(pol);
84903 return 0;
84904 }
84905 @@ -1477,7 +1477,7 @@ free_dst:
84906 goto out;
84907 }
84908
84909 -static int inline
84910 +static inline int
84911 xfrm_dst_alloc_copy(void **target, void *src, int size)
84912 {
84913 if (!*target) {
84914 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size)
84915 return 0;
84916 }
84917
84918 -static int inline
84919 +static inline int
84920 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
84921 {
84922 #ifdef CONFIG_XFRM_SUB_POLICY
84923 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
84924 #endif
84925 }
84926
84927 -static int inline
84928 +static inline int
84929 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
84930 {
84931 #ifdef CONFIG_XFRM_SUB_POLICY
84932 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
84933 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
84934
84935 restart:
84936 - genid = atomic_read(&flow_cache_genid);
84937 + genid = atomic_read_unchecked(&flow_cache_genid);
84938 policy = NULL;
84939 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
84940 pols[pi] = NULL;
84941 @@ -1680,7 +1680,7 @@ restart:
84942 goto error;
84943 }
84944 if (nx == -EAGAIN ||
84945 - genid != atomic_read(&flow_cache_genid)) {
84946 + genid != atomic_read_unchecked(&flow_cache_genid)) {
84947 xfrm_pols_put(pols, npols);
84948 goto restart;
84949 }
84950 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
84951 index b95a2d6..85c4d78 100644
84952 --- a/net/xfrm/xfrm_user.c
84953 +++ b/net/xfrm/xfrm_user.c
84954 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
84955 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
84956 int i;
84957
84958 + pax_track_stack();
84959 +
84960 if (xp->xfrm_nr == 0)
84961 return 0;
84962
84963 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
84964 int err;
84965 int n = 0;
84966
84967 + pax_track_stack();
84968 +
84969 if (attrs[XFRMA_MIGRATE] == NULL)
84970 return -EINVAL;
84971
84972 diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
84973 index 45b7d56..19e828c 100644
84974 --- a/samples/kobject/kset-example.c
84975 +++ b/samples/kobject/kset-example.c
84976 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kobject *kobj,
84977 }
84978
84979 /* Our custom sysfs_ops that we will associate with our ktype later on */
84980 -static struct sysfs_ops foo_sysfs_ops = {
84981 +static const struct sysfs_ops foo_sysfs_ops = {
84982 .show = foo_attr_show,
84983 .store = foo_attr_store,
84984 };
84985 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
84986 index 341b589..405aed3 100644
84987 --- a/scripts/Makefile.build
84988 +++ b/scripts/Makefile.build
84989 @@ -59,7 +59,7 @@ endif
84990 endif
84991
84992 # Do not include host rules unless needed
84993 -ifneq ($(hostprogs-y)$(hostprogs-m),)
84994 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
84995 include scripts/Makefile.host
84996 endif
84997
84998 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
84999 index 6f89fbb..53adc9c 100644
85000 --- a/scripts/Makefile.clean
85001 +++ b/scripts/Makefile.clean
85002 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
85003 __clean-files := $(extra-y) $(always) \
85004 $(targets) $(clean-files) \
85005 $(host-progs) \
85006 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
85007 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
85008 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
85009
85010 # as clean-files is given relative to the current directory, this adds
85011 # a $(obj) prefix, except for absolute paths
85012 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
85013 index 1ac414f..a1c1451 100644
85014 --- a/scripts/Makefile.host
85015 +++ b/scripts/Makefile.host
85016 @@ -31,6 +31,7 @@
85017 # Note: Shared libraries consisting of C++ files are not supported
85018
85019 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
85020 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
85021
85022 # C code
85023 # Executables compiled from a single .c file
85024 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
85025 # Shared libaries (only .c supported)
85026 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
85027 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
85028 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
85029 # Remove .so files from "xxx-objs"
85030 host-cobjs := $(filter-out %.so,$(host-cobjs))
85031
85032 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
85033 index 6bf21f8..c0546b3 100644
85034 --- a/scripts/basic/fixdep.c
85035 +++ b/scripts/basic/fixdep.c
85036 @@ -162,7 +162,7 @@ static void grow_config(int len)
85037 /*
85038 * Lookup a value in the configuration string.
85039 */
85040 -static int is_defined_config(const char * name, int len)
85041 +static int is_defined_config(const char * name, unsigned int len)
85042 {
85043 const char * pconfig;
85044 const char * plast = str_config + len_config - len;
85045 @@ -199,7 +199,7 @@ static void clear_config(void)
85046 /*
85047 * Record the use of a CONFIG_* word.
85048 */
85049 -static void use_config(char *m, int slen)
85050 +static void use_config(char *m, unsigned int slen)
85051 {
85052 char s[PATH_MAX];
85053 char *p;
85054 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen)
85055
85056 static void parse_config_file(char *map, size_t len)
85057 {
85058 - int *end = (int *) (map + len);
85059 + unsigned int *end = (unsigned int *) (map + len);
85060 /* start at +1, so that p can never be < map */
85061 - int *m = (int *) map + 1;
85062 + unsigned int *m = (unsigned int *) map + 1;
85063 char *p, *q;
85064
85065 for (; m < end; m++) {
85066 @@ -371,7 +371,7 @@ static void print_deps(void)
85067 static void traps(void)
85068 {
85069 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
85070 - int *p = (int *)test;
85071 + unsigned int *p = (unsigned int *)test;
85072
85073 if (*p != INT_CONF) {
85074 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
85075 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
85076 new file mode 100644
85077 index 0000000..8729101
85078 --- /dev/null
85079 +++ b/scripts/gcc-plugin.sh
85080 @@ -0,0 +1,2 @@
85081 +#!/bin/sh
85082 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
85083 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
85084 index 62a9025..65b82ad 100644
85085 --- a/scripts/mod/file2alias.c
85086 +++ b/scripts/mod/file2alias.c
85087 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
85088 unsigned long size, unsigned long id_size,
85089 void *symval)
85090 {
85091 - int i;
85092 + unsigned int i;
85093
85094 if (size % id_size || size < id_size) {
85095 if (cross_build != 0)
85096 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
85097 /* USB is special because the bcdDevice can be matched against a numeric range */
85098 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
85099 static void do_usb_entry(struct usb_device_id *id,
85100 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
85101 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
85102 unsigned char range_lo, unsigned char range_hi,
85103 struct module *mod)
85104 {
85105 @@ -151,7 +151,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
85106 {
85107 unsigned int devlo, devhi;
85108 unsigned char chi, clo;
85109 - int ndigits;
85110 + unsigned int ndigits;
85111
85112 id->match_flags = TO_NATIVE(id->match_flags);
85113 id->idVendor = TO_NATIVE(id->idVendor);
85114 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
85115 for (i = 0; i < count; i++) {
85116 const char *id = (char *)devs[i].id;
85117 char acpi_id[sizeof(devs[0].id)];
85118 - int j;
85119 + unsigned int j;
85120
85121 buf_printf(&mod->dev_table_buf,
85122 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
85123 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
85124
85125 for (j = 0; j < PNP_MAX_DEVICES; j++) {
85126 const char *id = (char *)card->devs[j].id;
85127 - int i2, j2;
85128 + unsigned int i2, j2;
85129 int dup = 0;
85130
85131 if (!id[0])
85132 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
85133 /* add an individual alias for every device entry */
85134 if (!dup) {
85135 char acpi_id[sizeof(card->devs[0].id)];
85136 - int k;
85137 + unsigned int k;
85138
85139 buf_printf(&mod->dev_table_buf,
85140 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
85141 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, const char *s)
85142 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
85143 char *alias)
85144 {
85145 - int i, j;
85146 + unsigned int i, j;
85147
85148 sprintf(alias, "dmi*");
85149
85150 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
85151 index 03efeab..0888989 100644
85152 --- a/scripts/mod/modpost.c
85153 +++ b/scripts/mod/modpost.c
85154 @@ -835,6 +835,7 @@ enum mismatch {
85155 INIT_TO_EXIT,
85156 EXIT_TO_INIT,
85157 EXPORT_TO_INIT_EXIT,
85158 + DATA_TO_TEXT
85159 };
85160
85161 struct sectioncheck {
85162 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[] = {
85163 .fromsec = { "__ksymtab*", NULL },
85164 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
85165 .mismatch = EXPORT_TO_INIT_EXIT
85166 +},
85167 +/* Do not reference code from writable data */
85168 +{
85169 + .fromsec = { DATA_SECTIONS, NULL },
85170 + .tosec = { TEXT_SECTIONS, NULL },
85171 + .mismatch = DATA_TO_TEXT
85172 }
85173 };
85174
85175 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
85176 continue;
85177 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
85178 continue;
85179 - if (sym->st_value == addr)
85180 - return sym;
85181 /* Find a symbol nearby - addr are maybe negative */
85182 d = sym->st_value - addr;
85183 + if (d == 0)
85184 + return sym;
85185 if (d < 0)
85186 d = addr - sym->st_value;
85187 if (d < distance) {
85188 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
85189 "Fix this by removing the %sannotation of %s "
85190 "or drop the export.\n",
85191 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
85192 + case DATA_TO_TEXT:
85193 +/*
85194 + fprintf(stderr,
85195 + "The variable %s references\n"
85196 + "the %s %s%s%s\n",
85197 + fromsym, to, sec2annotation(tosec), tosym, to_p);
85198 +*/
85199 + break;
85200 case NO_MISMATCH:
85201 /* To get warnings on missing members */
85202 break;
85203 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
85204 static void check_sec_ref(struct module *mod, const char *modname,
85205 struct elf_info *elf)
85206 {
85207 - int i;
85208 + unsigned int i;
85209 Elf_Shdr *sechdrs = elf->sechdrs;
85210
85211 /* Walk through all sections */
85212 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
85213 va_end(ap);
85214 }
85215
85216 -void buf_write(struct buffer *buf, const char *s, int len)
85217 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
85218 {
85219 if (buf->size - buf->pos < len) {
85220 buf->size += len + SZ;
85221 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
85222 if (fstat(fileno(file), &st) < 0)
85223 goto close_write;
85224
85225 - if (st.st_size != b->pos)
85226 + if (st.st_size != (off_t)b->pos)
85227 goto close_write;
85228
85229 tmp = NOFAIL(malloc(b->pos));
85230 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
85231 index 09f58e3..4b66092 100644
85232 --- a/scripts/mod/modpost.h
85233 +++ b/scripts/mod/modpost.h
85234 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
85235
85236 struct buffer {
85237 char *p;
85238 - int pos;
85239 - int size;
85240 + unsigned int pos;
85241 + unsigned int size;
85242 };
85243
85244 void __attribute__((format(printf, 2, 3)))
85245 buf_printf(struct buffer *buf, const char *fmt, ...);
85246
85247 void
85248 -buf_write(struct buffer *buf, const char *s, int len);
85249 +buf_write(struct buffer *buf, const char *s, unsigned int len);
85250
85251 struct module {
85252 struct module *next;
85253 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
85254 index ecf9c7d..d52b38e 100644
85255 --- a/scripts/mod/sumversion.c
85256 +++ b/scripts/mod/sumversion.c
85257 @@ -455,7 +455,7 @@ static void write_version(const char *filename, const char *sum,
85258 goto out;
85259 }
85260
85261 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
85262 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
85263 warn("writing sum in %s failed: %s\n",
85264 filename, strerror(errno));
85265 goto out;
85266 diff --git a/scripts/package/mkspec b/scripts/package/mkspec
85267 index 47bdd2f..d4d4e93 100755
85268 --- a/scripts/package/mkspec
85269 +++ b/scripts/package/mkspec
85270 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
85271 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
85272 echo "%endif"
85273
85274 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
85275 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
85276 echo "%ifarch ia64"
85277 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
85278 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
85279 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
85280 index 5c11312..72742b5 100644
85281 --- a/scripts/pnmtologo.c
85282 +++ b/scripts/pnmtologo.c
85283 @@ -237,14 +237,14 @@ static void write_header(void)
85284 fprintf(out, " * Linux logo %s\n", logoname);
85285 fputs(" */\n\n", out);
85286 fputs("#include <linux/linux_logo.h>\n\n", out);
85287 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
85288 + fprintf(out, "static unsigned char %s_data[] = {\n",
85289 logoname);
85290 }
85291
85292 static void write_footer(void)
85293 {
85294 fputs("\n};\n\n", out);
85295 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
85296 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
85297 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
85298 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
85299 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
85300 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
85301 fputs("\n};\n\n", out);
85302
85303 /* write logo clut */
85304 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
85305 + fprintf(out, "static unsigned char %s_clut[] = {\n",
85306 logoname);
85307 write_hex_cnt = 0;
85308 for (i = 0; i < logo_clutsize; i++) {
85309 diff --git a/scripts/tags.sh b/scripts/tags.sh
85310 index d52f7a0..269eb1b 100755
85311 --- a/scripts/tags.sh
85312 +++ b/scripts/tags.sh
85313 @@ -93,6 +93,11 @@ docscope()
85314 cscope -b -f cscope.out
85315 }
85316
85317 +dogtags()
85318 +{
85319 + all_sources | gtags -f -
85320 +}
85321 +
85322 exuberant()
85323 {
85324 all_sources | xargs $1 -a \
85325 @@ -164,6 +169,10 @@ case "$1" in
85326 docscope
85327 ;;
85328
85329 + "gtags")
85330 + dogtags
85331 + ;;
85332 +
85333 "tags")
85334 rm -f tags
85335 xtags ctags
85336 diff --git a/security/Kconfig b/security/Kconfig
85337 index fb363cd..886ace4 100644
85338 --- a/security/Kconfig
85339 +++ b/security/Kconfig
85340 @@ -4,6 +4,626 @@
85341
85342 menu "Security options"
85343
85344 +source grsecurity/Kconfig
85345 +
85346 +menu "PaX"
85347 +
85348 + config ARCH_TRACK_EXEC_LIMIT
85349 + bool
85350 +
85351 + config PAX_KERNEXEC_PLUGIN
85352 + bool
85353 +
85354 + config PAX_PER_CPU_PGD
85355 + bool
85356 +
85357 + config TASK_SIZE_MAX_SHIFT
85358 + int
85359 + depends on X86_64
85360 + default 47 if !PAX_PER_CPU_PGD
85361 + default 42 if PAX_PER_CPU_PGD
85362 +
85363 + config PAX_ENABLE_PAE
85364 + bool
85365 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
85366 +
85367 +config PAX
85368 + bool "Enable various PaX features"
85369 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
85370 + help
85371 + This allows you to enable various PaX features. PaX adds
85372 + intrusion prevention mechanisms to the kernel that reduce
85373 + the risks posed by exploitable memory corruption bugs.
85374 +
85375 +menu "PaX Control"
85376 + depends on PAX
85377 +
85378 +config PAX_SOFTMODE
85379 + bool 'Support soft mode'
85380 + help
85381 + Enabling this option will allow you to run PaX in soft mode, that
85382 + is, PaX features will not be enforced by default, only on executables
85383 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
85384 + support as they are the only way to mark executables for soft mode use.
85385 +
85386 + Soft mode can be activated by using the "pax_softmode=1" kernel command
85387 + line option on boot. Furthermore you can control various PaX features
85388 + at runtime via the entries in /proc/sys/kernel/pax.
85389 +
85390 +config PAX_EI_PAX
85391 + bool 'Use legacy ELF header marking'
85392 + help
85393 + Enabling this option will allow you to control PaX features on
85394 + a per executable basis via the 'chpax' utility available at
85395 + http://pax.grsecurity.net/. The control flags will be read from
85396 + an otherwise reserved part of the ELF header. This marking has
85397 + numerous drawbacks (no support for soft-mode, toolchain does not
85398 + know about the non-standard use of the ELF header) therefore it
85399 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
85400 + support.
85401 +
85402 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85403 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
85404 + option otherwise they will not get any protection.
85405 +
85406 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
85407 + support as well, they will override the legacy EI_PAX marks.
85408 +
85409 +config PAX_PT_PAX_FLAGS
85410 + bool 'Use ELF program header marking'
85411 + help
85412 + Enabling this option will allow you to control PaX features on
85413 + a per executable basis via the 'paxctl' utility available at
85414 + http://pax.grsecurity.net/. The control flags will be read from
85415 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
85416 + has the benefits of supporting both soft mode and being fully
85417 + integrated into the toolchain (the binutils patch is available
85418 + from http://pax.grsecurity.net).
85419 +
85420 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85421 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
85422 + support otherwise they will not get any protection.
85423 +
85424 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
85425 + must make sure that the marks are the same if a binary has both marks.
85426 +
85427 + Note that if you enable the legacy EI_PAX marking support as well,
85428 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
85429 +
85430 +config PAX_XATTR_PAX_FLAGS
85431 + bool 'Use filesystem extended attributes marking'
85432 + depends on EXPERT
85433 + select CIFS_XATTR if CIFS
85434 + select EXT2_FS_XATTR if EXT2_FS
85435 + select EXT3_FS_XATTR if EXT3_FS
85436 + select EXT4_FS_XATTR if EXT4_FS
85437 + select JFFS2_FS_XATTR if JFFS2_FS
85438 + select REISERFS_FS_XATTR if REISERFS_FS
85439 + select UBIFS_FS_XATTR if UBIFS_FS
85440 + help
85441 + Enabling this option will allow you to control PaX features on
85442 + a per executable basis via the 'setfattr' utility. The control
85443 + flags will be read from the user.pax.flags extended attribute of
85444 + the file. This marking has the benefit of supporting binary-only
85445 + applications that self-check themselves (e.g., skype) and would
85446 + not tolerate chpax/paxctl changes. The main drawback is that
85447 + extended attributes are not supported by some filesystems (e.g.,
85448 + isofs, squashfs, tmpfs, udf, vfat) so copying files through such
85449 + filesystems will lose the extended attributes and these PaX markings.
85450 +
85451 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85452 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
85453 + support otherwise they will not get any protection.
85454 +
85455 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
85456 + must make sure that the marks are the same if a binary has both marks.
85457 +
85458 + Note that if you enable the legacy EI_PAX marking support as well,
85459 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
85460 +
85461 +choice
85462 + prompt 'MAC system integration'
85463 + default PAX_HAVE_ACL_FLAGS
85464 + help
85465 + Mandatory Access Control systems have the option of controlling
85466 + PaX flags on a per executable basis, choose the method supported
85467 + by your particular system.
85468 +
85469 + - "none": if your MAC system does not interact with PaX,
85470 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
85471 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
85472 +
85473 + NOTE: this option is for developers/integrators only.
85474 +
85475 + config PAX_NO_ACL_FLAGS
85476 + bool 'none'
85477 +
85478 + config PAX_HAVE_ACL_FLAGS
85479 + bool 'direct'
85480 +
85481 + config PAX_HOOK_ACL_FLAGS
85482 + bool 'hook'
85483 +endchoice
85484 +
85485 +endmenu
85486 +
85487 +menu "Non-executable pages"
85488 + depends on PAX
85489 +
85490 +config PAX_NOEXEC
85491 + bool "Enforce non-executable pages"
85492 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
85493 + help
85494 + By design some architectures do not allow for protecting memory
85495 + pages against execution or even if they do, Linux does not make
85496 + use of this feature. In practice this means that if a page is
85497 + readable (such as the stack or heap) it is also executable.
85498 +
85499 + There is a well known exploit technique that makes use of this
85500 + fact and a common programming mistake where an attacker can
85501 + introduce code of his choice somewhere in the attacked program's
85502 + memory (typically the stack or the heap) and then execute it.
85503 +
85504 + If the attacked program was running with different (typically
85505 + higher) privileges than that of the attacker, then he can elevate
85506 + his own privilege level (e.g. get a root shell, write to files for
85507 + which he does not have write access to, etc).
85508 +
85509 + Enabling this option will let you choose from various features
85510 + that prevent the injection and execution of 'foreign' code in
85511 + a program.
85512 +
85513 + This will also break programs that rely on the old behaviour and
85514 + expect that dynamically allocated memory via the malloc() family
85515 + of functions is executable (which it is not). Notable examples
85516 + are the XFree86 4.x server, the java runtime and wine.
85517 +
85518 +config PAX_PAGEEXEC
85519 + bool "Paging based non-executable pages"
85520 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
85521 + select S390_SWITCH_AMODE if S390
85522 + select S390_EXEC_PROTECT if S390
85523 + select ARCH_TRACK_EXEC_LIMIT if X86_32
85524 + help
85525 + This implementation is based on the paging feature of the CPU.
85526 + On i386 without hardware non-executable bit support there is a
85527 + variable but usually low performance impact, however on Intel's
85528 + P4 core based CPUs it is very high so you should not enable this
85529 + for kernels meant to be used on such CPUs.
85530 +
85531 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
85532 + with hardware non-executable bit support there is no performance
85533 + impact, on ppc the impact is negligible.
85534 +
85535 + Note that several architectures require various emulations due to
85536 + badly designed userland ABIs, this will cause a performance impact
85537 + but will disappear as soon as userland is fixed. For example, ppc
85538 + userland MUST have been built with secure-plt by a recent toolchain.
85539 +
85540 +config PAX_SEGMEXEC
85541 + bool "Segmentation based non-executable pages"
85542 + depends on PAX_NOEXEC && X86_32
85543 + help
85544 + This implementation is based on the segmentation feature of the
85545 + CPU and has a very small performance impact, however applications
85546 + will be limited to a 1.5 GB address space instead of the normal
85547 + 3 GB.
85548 +
85549 +config PAX_EMUTRAMP
85550 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
85551 + default y if PARISC
85552 + help
85553 + There are some programs and libraries that for one reason or
85554 + another attempt to execute special small code snippets from
85555 + non-executable memory pages. Most notable examples are the
85556 + signal handler return code generated by the kernel itself and
85557 + the GCC trampolines.
85558 +
85559 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
85560 + such programs will no longer work under your kernel.
85561 +
85562 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
85563 + utilities to enable trampoline emulation for the affected programs
85564 + yet still have the protection provided by the non-executable pages.
85565 +
85566 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
85567 + your system will not even boot.
85568 +
85569 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
85570 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
85571 + for the affected files.
85572 +
85573 + NOTE: enabling this feature *may* open up a loophole in the
85574 + protection provided by non-executable pages that an attacker
85575 + could abuse. Therefore the best solution is to not have any
85576 + files on your system that would require this option. This can
85577 + be achieved by not using libc5 (which relies on the kernel
85578 + signal handler return code) and not using or rewriting programs
85579 + that make use of the nested function implementation of GCC.
85580 + Skilled users can just fix GCC itself so that it implements
85581 + nested function calls in a way that does not interfere with PaX.
85582 +
85583 +config PAX_EMUSIGRT
85584 + bool "Automatically emulate sigreturn trampolines"
85585 + depends on PAX_EMUTRAMP && PARISC
85586 + default y
85587 + help
85588 + Enabling this option will have the kernel automatically detect
85589 + and emulate signal return trampolines executing on the stack
85590 + that would otherwise lead to task termination.
85591 +
85592 + This solution is intended as a temporary one for users with
85593 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
85594 + Modula-3 runtime, etc) or executables linked to such, basically
85595 + everything that does not specify its own SA_RESTORER function in
85596 + normal executable memory like glibc 2.1+ does.
85597 +
85598 + On parisc you MUST enable this option, otherwise your system will
85599 + not even boot.
85600 +
85601 + NOTE: this feature cannot be disabled on a per executable basis
85602 + and since it *does* open up a loophole in the protection provided
85603 + by non-executable pages, the best solution is to not have any
85604 + files on your system that would require this option.
85605 +
85606 +config PAX_MPROTECT
85607 + bool "Restrict mprotect()"
85608 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
85609 + help
85610 + Enabling this option will prevent programs from
85611 + - changing the executable status of memory pages that were
85612 + not originally created as executable,
85613 + - making read-only executable pages writable again,
85614 + - creating executable pages from anonymous memory,
85615 + - making read-only-after-relocations (RELRO) data pages writable again.
85616 +
85617 + You should say Y here to complete the protection provided by
85618 + the enforcement of non-executable pages.
85619 +
85620 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
85621 + this feature on a per file basis.
85622 +
85623 +config PAX_MPROTECT_COMPAT
85624 + bool "Use legacy/compat protection demoting (read help)"
85625 + depends on PAX_MPROTECT
85626 + default n
85627 + help
85628 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
85629 + by sending the proper error code to the application. For some broken
85630 + userland, this can cause problems with Python or other applications. The
85631 + current implementation however allows for applications like clamav to
85632 + detect if JIT compilation/execution is allowed and to fall back gracefully
85633 + to an interpreter-based mode if it does not. While we encourage everyone
85634 + to use the current implementation as-is and push upstream to fix broken
85635 + userland (note that the RWX logging option can assist with this), in some
85636 + environments this may not be possible. Having to disable MPROTECT
85637 + completely on certain binaries reduces the security benefit of PaX,
85638 + so this option is provided for those environments to revert to the old
85639 + behavior.
85640 +
85641 +config PAX_ELFRELOCS
85642 + bool "Allow ELF text relocations (read help)"
85643 + depends on PAX_MPROTECT
85644 + default n
85645 + help
85646 + Non-executable pages and mprotect() restrictions are effective
85647 + in preventing the introduction of new executable code into an
85648 + attacked task's address space. There remain only two venues
85649 + for this kind of attack: if the attacker can execute already
85650 + existing code in the attacked task then he can either have it
85651 + create and mmap() a file containing his code or have it mmap()
85652 + an already existing ELF library that does not have position
85653 + independent code in it and use mprotect() on it to make it
85654 + writable and copy his code there. While protecting against
85655 + the former approach is beyond PaX, the latter can be prevented
85656 + by having only PIC ELF libraries on one's system (which do not
85657 + need to relocate their code). If you are sure this is your case,
85658 + as is the case with all modern Linux distributions, then leave
85659 + this option disabled. You should say 'n' here.
85660 +
85661 +config PAX_ETEXECRELOCS
85662 + bool "Allow ELF ET_EXEC text relocations"
85663 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
85664 + select PAX_ELFRELOCS
85665 + default y
85666 + help
85667 + On some architectures there are incorrectly created applications
85668 + that require text relocations and would not work without enabling
85669 + this option. If you are an alpha, ia64 or parisc user, you should
85670 + enable this option and disable it once you have made sure that
85671 + none of your applications need it.
85672 +
85673 +config PAX_EMUPLT
85674 + bool "Automatically emulate ELF PLT"
85675 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
85676 + default y
85677 + help
85678 + Enabling this option will have the kernel automatically detect
85679 + and emulate the Procedure Linkage Table entries in ELF files.
85680 + On some architectures such entries are in writable memory, and
85681 + become non-executable leading to task termination. Therefore
85682 + it is mandatory that you enable this option on alpha, parisc,
85683 + sparc and sparc64, otherwise your system would not even boot.
85684 +
85685 + NOTE: this feature *does* open up a loophole in the protection
85686 + provided by the non-executable pages, therefore the proper
85687 + solution is to modify the toolchain to produce a PLT that does
85688 + not need to be writable.
85689 +
85690 +config PAX_DLRESOLVE
85691 + bool 'Emulate old glibc resolver stub'
85692 + depends on PAX_EMUPLT && SPARC
85693 + default n
85694 + help
85695 + This option is needed if userland has an old glibc (before 2.4)
85696 + that puts a 'save' instruction into the runtime generated resolver
85697 + stub that needs special emulation.
85698 +
85699 +config PAX_KERNEXEC
85700 + bool "Enforce non-executable kernel pages"
85701 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
85702 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
85703 + select PAX_KERNEXEC_PLUGIN if X86_64
85704 + help
85705 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
85706 + that is, enabling this option will make it harder to inject
85707 + and execute 'foreign' code in kernel memory itself.
85708 +
85709 + Note that on x86_64 kernels there is a known regression when
85710 + this feature and KVM/VMX are both enabled in the host kernel.
85711 +
85712 +choice
85713 + prompt "Return Address Instrumentation Method"
85714 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
85715 + depends on PAX_KERNEXEC_PLUGIN
85716 + help
85717 + Select the method used to instrument function pointer dereferences.
85718 + Note that binary modules cannot be instrumented by this approach.
85719 +
85720 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
85721 + bool "bts"
85722 + help
85723 + This method is compatible with binary only modules but has
85724 + a higher runtime overhead.
85725 +
85726 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
85727 + bool "or"
85728 + depends on !PARAVIRT
85729 + help
85730 + This method is incompatible with binary only modules but has
85731 + a lower runtime overhead.
85732 +endchoice
85733 +
85734 +config PAX_KERNEXEC_PLUGIN_METHOD
85735 + string
85736 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
85737 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
85738 + default ""
85739 +
85740 +config PAX_KERNEXEC_MODULE_TEXT
85741 + int "Minimum amount of memory reserved for module code"
85742 + default "4"
85743 + depends on PAX_KERNEXEC && X86_32 && MODULES
85744 + help
85745 + Due to implementation details the kernel must reserve a fixed
85746 + amount of memory for module code at compile time that cannot be
85747 + changed at runtime. Here you can specify the minimum amount
85748 + in MB that will be reserved. Due to the same implementation
85749 + details this size will always be rounded up to the next 2/4 MB
85750 + boundary (depends on PAE) so the actually available memory for
85751 + module code will usually be more than this minimum.
85752 +
85753 + The default 4 MB should be enough for most users but if you have
85754 + an excessive number of modules (e.g., most distribution configs
85755 + compile many drivers as modules) or use huge modules such as
85756 + nvidia's kernel driver, you will need to adjust this amount.
85757 + A good rule of thumb is to look at your currently loaded kernel
85758 + modules and add up their sizes.
85759 +
85760 +endmenu
85761 +
85762 +menu "Address Space Layout Randomization"
85763 + depends on PAX
85764 +
85765 +config PAX_ASLR
85766 + bool "Address Space Layout Randomization"
85767 + help
85768 + Many if not most exploit techniques rely on the knowledge of
85769 + certain addresses in the attacked program. The following options
85770 + will allow the kernel to apply a certain amount of randomization
85771 + to specific parts of the program thereby forcing an attacker to
85772 + guess them in most cases. Any failed guess will most likely crash
85773 + the attacked program which allows the kernel to detect such attempts
85774 + and react on them. PaX itself provides no reaction mechanisms,
85775 + instead it is strongly encouraged that you make use of Nergal's
85776 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
85777 + (http://www.grsecurity.net/) built-in crash detection features or
85778 + develop one yourself.
85779 +
85780 + By saying Y here you can choose to randomize the following areas:
85781 + - top of the task's kernel stack
85782 + - top of the task's userland stack
85783 + - base address for mmap() requests that do not specify one
85784 + (this includes all libraries)
85785 + - base address of the main executable
85786 +
85787 + It is strongly recommended to say Y here as address space layout
85788 + randomization has negligible impact on performance yet it provides
85789 + a very effective protection.
85790 +
85791 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
85792 + this feature on a per file basis.
85793 +
85794 +config PAX_RANDKSTACK
85795 + bool "Randomize kernel stack base"
85796 + depends on X86_TSC && X86
85797 + help
85798 + By saying Y here the kernel will randomize every task's kernel
85799 + stack on every system call. This will not only force an attacker
85800 + to guess it but also prevent him from making use of possible
85801 + leaked information about it.
85802 +
85803 + Since the kernel stack is a rather scarce resource, randomization
85804 + may cause unexpected stack overflows, therefore you should very
85805 + carefully test your system. Note that once enabled in the kernel
85806 + configuration, this feature cannot be disabled on a per file basis.
85807 +
85808 +config PAX_RANDUSTACK
85809 + bool "Randomize user stack base"
85810 + depends on PAX_ASLR
85811 + help
85812 + By saying Y here the kernel will randomize every task's userland
85813 + stack. The randomization is done in two steps where the second
85814 + one may apply a big amount of shift to the top of the stack and
85815 + cause problems for programs that want to use lots of memory (more
85816 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
85817 + For this reason the second step can be controlled by 'chpax' or
85818 + 'paxctl' on a per file basis.
85819 +
85820 +config PAX_RANDMMAP
85821 + bool "Randomize mmap() base"
85822 + depends on PAX_ASLR
85823 + help
85824 + By saying Y here the kernel will use a randomized base address for
85825 + mmap() requests that do not specify one themselves. As a result
85826 + all dynamically loaded libraries will appear at random addresses
85827 + and therefore be harder to exploit by a technique where an attacker
85828 + attempts to execute library code for his purposes (e.g. spawn a
85829 + shell from an exploited program that is running at an elevated
85830 + privilege level).
85831 +
85832 + Furthermore, if a program is relinked as a dynamic ELF file, its
85833 + base address will be randomized as well, completing the full
85834 + randomization of the address space layout. Attacking such programs
85835 + becomes a guess game. You can find an example of doing this at
85836 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
85837 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
85838 +
85839 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
85840 + feature on a per file basis.
85841 +
85842 +endmenu
85843 +
85844 +menu "Miscellaneous hardening features"
85845 +
85846 +config PAX_MEMORY_SANITIZE
85847 + bool "Sanitize all freed memory"
85848 + depends on !HIBERNATION
85849 + help
85850 + By saying Y here the kernel will erase memory pages as soon as they
85851 + are freed. This in turn reduces the lifetime of data stored in the
85852 + pages, making it less likely that sensitive information such as
85853 + passwords, cryptographic secrets, etc stay in memory for too long.
85854 +
85855 + This is especially useful for programs whose runtime is short, long
85856 + lived processes and the kernel itself benefit from this as long as
85857 + they operate on whole memory pages and ensure timely freeing of pages
85858 + that may hold sensitive information.
85859 +
85860 + The tradeoff is performance impact, on a single CPU system kernel
85861 + compilation sees a 3% slowdown, other systems and workloads may vary
85862 + and you are advised to test this feature on your expected workload
85863 + before deploying it.
85864 +
85865 + Note that this feature does not protect data stored in live pages,
85866 + e.g., process memory swapped to disk may stay there for a long time.
85867 +
85868 +config PAX_MEMORY_STACKLEAK
85869 + bool "Sanitize kernel stack"
85870 + depends on X86
85871 + help
85872 + By saying Y here the kernel will erase the kernel stack before it
85873 + returns from a system call. This in turn reduces the information
85874 + that a kernel stack leak bug can reveal.
85875 +
85876 + Note that such a bug can still leak information that was put on
85877 + the stack by the current system call (the one eventually triggering
85878 + the bug) but traces of earlier system calls on the kernel stack
85879 + cannot leak anymore.
85880 +
85881 + The tradeoff is performance impact, on a single CPU system kernel
85882 + compilation sees a 1% slowdown, other systems and workloads may vary
85883 + and you are advised to test this feature on your expected workload
85884 + before deploying it.
85885 +
85886 + Note: full support for this feature requires gcc with plugin support
85887 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
85888 + versions means that functions with large enough stack frames may
85889 + leave uninitialized memory behind that may be exposed to a later
85890 + syscall leaking the stack.
85891 +
85892 +config PAX_MEMORY_UDEREF
85893 + bool "Prevent invalid userland pointer dereference"
85894 + depends on X86 && !UML_X86 && !XEN
85895 + select PAX_PER_CPU_PGD if X86_64
85896 + help
85897 + By saying Y here the kernel will be prevented from dereferencing
85898 + userland pointers in contexts where the kernel expects only kernel
85899 + pointers. This is both a useful runtime debugging feature and a
85900 + security measure that prevents exploiting a class of kernel bugs.
85901 +
85902 + The tradeoff is that some virtualization solutions may experience
85903 + a huge slowdown and therefore you should not enable this feature
85904 + for kernels meant to run in such environments. Whether a given VM
85905 + solution is affected or not is best determined by simply trying it
85906 + out, the performance impact will be obvious right on boot as this
85907 + mechanism engages from very early on. A good rule of thumb is that
85908 + VMs running on CPUs without hardware virtualization support (i.e.,
85909 + the majority of IA-32 CPUs) will likely experience the slowdown.
85910 +
85911 +config PAX_REFCOUNT
85912 + bool "Prevent various kernel object reference counter overflows"
85913 + depends on GRKERNSEC && (X86 || SPARC64)
85914 + help
85915 + By saying Y here the kernel will detect and prevent overflowing
85916 + various (but not all) kinds of object reference counters. Such
85917 + overflows can normally occur due to bugs only and are often, if
85918 + not always, exploitable.
85919 +
85920 + The tradeoff is that data structures protected by an overflowed
85921 + refcount will never be freed and therefore will leak memory. Note
85922 + that this leak also happens even without this protection but in
85923 + that case the overflow can eventually trigger the freeing of the
85924 + data structure while it is still being used elsewhere, resulting
85925 + in the exploitable situation that this feature prevents.
85926 +
85927 + Since this has a negligible performance impact, you should enable
85928 + this feature.
85929 +
85930 +config PAX_USERCOPY
85931 + bool "Harden heap object copies between kernel and userland"
85932 + depends on X86 || PPC || SPARC || ARM
85933 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
85934 + help
85935 + By saying Y here the kernel will enforce the size of heap objects
85936 + when they are copied in either direction between the kernel and
85937 + userland, even if only a part of the heap object is copied.
85938 +
85939 + Specifically, this checking prevents information leaking from the
85940 + kernel heap during kernel to userland copies (if the kernel heap
85941 + object is otherwise fully initialized) and prevents kernel heap
85942 + overflows during userland to kernel copies.
85943 +
85944 + Note that the current implementation provides the strictest bounds
85945 + checks for the SLUB allocator.
85946 +
85947 + Enabling this option also enables per-slab cache protection against
85948 + data in a given cache being copied into/out of via userland
85949 + accessors. Though the whitelist of regions will be reduced over
85950 + time, it notably protects important data structures like task structs.
85951 +
85952 +
85953 + If frame pointers are enabled on x86, this option will also
85954 + restrict copies into and out of the kernel stack to local variables
85955 + within a single frame.
85956 +
85957 + Since this has a negligible performance impact, you should enable
85958 + this feature.
85959 +
85960 +endmenu
85961 +
85962 +endmenu
85963 +
85964 config KEYS
85965 bool "Enable access key retention support"
85966 help
85967 @@ -146,7 +766,7 @@ config INTEL_TXT
85968 config LSM_MMAP_MIN_ADDR
85969 int "Low address space for LSM to protect from user allocation"
85970 depends on SECURITY && SECURITY_SELINUX
85971 - default 65536
85972 + default 32768
85973 help
85974 This is the portion of low virtual memory which should be protected
85975 from userspace allocation. Keeping a user from writing to low pages
85976 diff --git a/security/capability.c b/security/capability.c
85977 index fce07a7..5f12858 100644
85978 --- a/security/capability.c
85979 +++ b/security/capability.c
85980 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *lsmrule)
85981 }
85982 #endif /* CONFIG_AUDIT */
85983
85984 -struct security_operations default_security_ops = {
85985 +struct security_operations default_security_ops __read_only = {
85986 .name = "default",
85987 };
85988
85989 diff --git a/security/commoncap.c b/security/commoncap.c
85990 index fe30751..aaba312 100644
85991 --- a/security/commoncap.c
85992 +++ b/security/commoncap.c
85993 @@ -27,6 +27,8 @@
85994 #include <linux/sched.h>
85995 #include <linux/prctl.h>
85996 #include <linux/securebits.h>
85997 +#include <linux/syslog.h>
85998 +#include <net/sock.h>
85999
86000 /*
86001 * If a non-root user executes a setuid-root binary in
86002 @@ -50,9 +52,18 @@ static void warn_setuid_and_fcaps_mixed(char *fname)
86003 }
86004 }
86005
86006 +#ifdef CONFIG_NET
86007 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
86008 +#endif
86009 +
86010 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
86011 {
86012 +#ifdef CONFIG_NET
86013 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
86014 +#else
86015 NETLINK_CB(skb).eff_cap = current_cap();
86016 +#endif
86017 +
86018 return 0;
86019 }
86020
86021 @@ -582,6 +593,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
86022 {
86023 const struct cred *cred = current_cred();
86024
86025 + if (gr_acl_enable_at_secure())
86026 + return 1;
86027 +
86028 if (cred->uid != 0) {
86029 if (bprm->cap_effective)
86030 return 1;
86031 @@ -956,13 +970,18 @@ error:
86032 /**
86033 * cap_syslog - Determine whether syslog function is permitted
86034 * @type: Function requested
86035 + * @from_file: Whether this request came from an open file (i.e. /proc)
86036 *
86037 * Determine whether the current process is permitted to use a particular
86038 * syslog function, returning 0 if permission is granted, -ve if not.
86039 */
86040 -int cap_syslog(int type)
86041 +int cap_syslog(int type, bool from_file)
86042 {
86043 - if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
86044 + /* /proc/kmsg can open be opened by CAP_SYS_ADMIN */
86045 + if (type != SYSLOG_ACTION_OPEN && from_file)
86046 + return 0;
86047 + if ((type != SYSLOG_ACTION_READ_ALL &&
86048 + type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
86049 return -EPERM;
86050 return 0;
86051 }
86052 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
86053 index 165eb53..b1db4eb 100644
86054 --- a/security/integrity/ima/ima.h
86055 +++ b/security/integrity/ima/ima.h
86056 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
86057 extern spinlock_t ima_queue_lock;
86058
86059 struct ima_h_table {
86060 - atomic_long_t len; /* number of stored measurements in the list */
86061 - atomic_long_t violations;
86062 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
86063 + atomic_long_unchecked_t violations;
86064 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
86065 };
86066 extern struct ima_h_table ima_htable;
86067 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
86068 index 852bf85..35d6df3 100644
86069 --- a/security/integrity/ima/ima_api.c
86070 +++ b/security/integrity/ima/ima_api.c
86071 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
86072 int result;
86073
86074 /* can overflow, only indicator */
86075 - atomic_long_inc(&ima_htable.violations);
86076 + atomic_long_inc_unchecked(&ima_htable.violations);
86077
86078 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
86079 if (!entry) {
86080 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
86081 index 0c72c9c..433e29b 100644
86082 --- a/security/integrity/ima/ima_fs.c
86083 +++ b/security/integrity/ima/ima_fs.c
86084 @@ -27,12 +27,12 @@
86085 static int valid_policy = 1;
86086 #define TMPBUFLEN 12
86087 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
86088 - loff_t *ppos, atomic_long_t *val)
86089 + loff_t *ppos, atomic_long_unchecked_t *val)
86090 {
86091 char tmpbuf[TMPBUFLEN];
86092 ssize_t len;
86093
86094 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
86095 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
86096 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
86097 }
86098
86099 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
86100 index e19316d..339f7ae 100644
86101 --- a/security/integrity/ima/ima_queue.c
86102 +++ b/security/integrity/ima/ima_queue.c
86103 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
86104 INIT_LIST_HEAD(&qe->later);
86105 list_add_tail_rcu(&qe->later, &ima_measurements);
86106
86107 - atomic_long_inc(&ima_htable.len);
86108 + atomic_long_inc_unchecked(&ima_htable.len);
86109 key = ima_hash_key(entry->digest);
86110 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
86111 return 0;
86112 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
86113 index e031952..c9a535d 100644
86114 --- a/security/keys/keyring.c
86115 +++ b/security/keys/keyring.c
86116 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
86117 ret = -EFAULT;
86118
86119 for (loop = 0; loop < klist->nkeys; loop++) {
86120 + key_serial_t serial;
86121 key = klist->keys[loop];
86122 + serial = key->serial;
86123
86124 tmp = sizeof(key_serial_t);
86125 if (tmp > buflen)
86126 tmp = buflen;
86127
86128 - if (copy_to_user(buffer,
86129 - &key->serial,
86130 - tmp) != 0)
86131 + if (copy_to_user(buffer, &serial, tmp))
86132 goto error;
86133
86134 buflen -= tmp;
86135 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
86136 index 931cfda..e71808a 100644
86137 --- a/security/keys/process_keys.c
86138 +++ b/security/keys/process_keys.c
86139 @@ -208,7 +208,7 @@ static int install_process_keyring(void)
86140 ret = install_process_keyring_to_cred(new);
86141 if (ret < 0) {
86142 abort_creds(new);
86143 - return ret != -EEXIST ?: 0;
86144 + return ret != -EEXIST ? ret : 0;
86145 }
86146
86147 return commit_creds(new);
86148 diff --git a/security/min_addr.c b/security/min_addr.c
86149 index d9f9425..c28cef4 100644
86150 --- a/security/min_addr.c
86151 +++ b/security/min_addr.c
86152 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
86153 */
86154 static void update_mmap_min_addr(void)
86155 {
86156 +#ifndef SPARC
86157 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
86158 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
86159 mmap_min_addr = dac_mmap_min_addr;
86160 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
86161 #else
86162 mmap_min_addr = dac_mmap_min_addr;
86163 #endif
86164 +#endif
86165 }
86166
86167 /*
86168 diff --git a/security/root_plug.c b/security/root_plug.c
86169 index 2f7ffa6..0455400 100644
86170 --- a/security/root_plug.c
86171 +++ b/security/root_plug.c
86172 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm)
86173 return 0;
86174 }
86175
86176 -static struct security_operations rootplug_security_ops = {
86177 +static struct security_operations rootplug_security_ops __read_only = {
86178 .bprm_check_security = rootplug_bprm_check_security,
86179 };
86180
86181 diff --git a/security/security.c b/security/security.c
86182 index c4c6732..7abf13b 100644
86183 --- a/security/security.c
86184 +++ b/security/security.c
86185 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1];
86186 extern struct security_operations default_security_ops;
86187 extern void security_fixup_ops(struct security_operations *ops);
86188
86189 -struct security_operations *security_ops; /* Initialized to NULL */
86190 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
86191
86192 static inline int verify(struct security_operations *ops)
86193 {
86194 @@ -106,7 +106,7 @@ int __init security_module_enable(struct security_operations *ops)
86195 * If there is already a security module registered with the kernel,
86196 * an error will be returned. Otherwise %0 is returned on success.
86197 */
86198 -int register_security(struct security_operations *ops)
86199 +int __init register_security(struct security_operations *ops)
86200 {
86201 if (verify(ops)) {
86202 printk(KERN_DEBUG "%s could not verify "
86203 @@ -199,9 +199,9 @@ int security_quota_on(struct dentry *dentry)
86204 return security_ops->quota_on(dentry);
86205 }
86206
86207 -int security_syslog(int type)
86208 +int security_syslog(int type, bool from_file)
86209 {
86210 - return security_ops->syslog(type);
86211 + return security_ops->syslog(type, from_file);
86212 }
86213
86214 int security_settime(struct timespec *ts, struct timezone *tz)
86215 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
86216 index a106754..ca3a589 100644
86217 --- a/security/selinux/hooks.c
86218 +++ b/security/selinux/hooks.c
86219 @@ -76,6 +76,7 @@
86220 #include <linux/selinux.h>
86221 #include <linux/mutex.h>
86222 #include <linux/posix-timers.h>
86223 +#include <linux/syslog.h>
86224
86225 #include "avc.h"
86226 #include "objsec.h"
86227 @@ -131,7 +132,7 @@ int selinux_enabled = 1;
86228 * Minimal support for a secondary security module,
86229 * just to allow the use of the capability module.
86230 */
86231 -static struct security_operations *secondary_ops;
86232 +static struct security_operations *secondary_ops __read_only;
86233
86234 /* Lists of inode and superblock security structures initialized
86235 before the policy was loaded. */
86236 @@ -2050,29 +2051,30 @@ static int selinux_quota_on(struct dentry *dentry)
86237 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
86238 }
86239
86240 -static int selinux_syslog(int type)
86241 +static int selinux_syslog(int type, bool from_file)
86242 {
86243 int rc;
86244
86245 - rc = cap_syslog(type);
86246 + rc = cap_syslog(type, from_file);
86247 if (rc)
86248 return rc;
86249
86250 switch (type) {
86251 - case 3: /* Read last kernel messages */
86252 - case 10: /* Return size of the log buffer */
86253 + case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
86254 + case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
86255 rc = task_has_system(current, SYSTEM__SYSLOG_READ);
86256 break;
86257 - case 6: /* Disable logging to console */
86258 - case 7: /* Enable logging to console */
86259 - case 8: /* Set level of messages printed to console */
86260 + case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
86261 + case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
86262 + /* Set level of messages printed to console */
86263 + case SYSLOG_ACTION_CONSOLE_LEVEL:
86264 rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
86265 break;
86266 - case 0: /* Close log */
86267 - case 1: /* Open log */
86268 - case 2: /* Read from log */
86269 - case 4: /* Read/clear last kernel messages */
86270 - case 5: /* Clear ring buffer */
86271 + case SYSLOG_ACTION_CLOSE: /* Close log */
86272 + case SYSLOG_ACTION_OPEN: /* Open log */
86273 + case SYSLOG_ACTION_READ: /* Read from log */
86274 + case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
86275 + case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
86276 default:
86277 rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
86278 break;
86279 @@ -5457,7 +5459,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
86280
86281 #endif
86282
86283 -static struct security_operations selinux_ops = {
86284 +static struct security_operations selinux_ops __read_only = {
86285 .name = "selinux",
86286
86287 .ptrace_access_check = selinux_ptrace_access_check,
86288 @@ -5841,7 +5843,9 @@ int selinux_disable(void)
86289 avc_disable();
86290
86291 /* Reset security_ops to the secondary module, dummy or capability. */
86292 + pax_open_kernel();
86293 security_ops = secondary_ops;
86294 + pax_close_kernel();
86295
86296 /* Unregister netfilter hooks. */
86297 selinux_nf_ip_exit();
86298 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
86299 index 13128f9..c23c736 100644
86300 --- a/security/selinux/include/xfrm.h
86301 +++ b/security/selinux/include/xfrm.h
86302 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
86303
86304 static inline void selinux_xfrm_notify_policyload(void)
86305 {
86306 - atomic_inc(&flow_cache_genid);
86307 + atomic_inc_unchecked(&flow_cache_genid);
86308 }
86309 #else
86310 static inline int selinux_xfrm_enabled(void)
86311 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
86312 index ff17820..d68084c 100644
86313 --- a/security/selinux/ss/services.c
86314 +++ b/security/selinux/ss/services.c
86315 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, size_t len)
86316 int rc = 0;
86317 struct policy_file file = { data, len }, *fp = &file;
86318
86319 + pax_track_stack();
86320 +
86321 if (!ss_initialized) {
86322 avtab_cache_init();
86323 if (policydb_read(&policydb, fp)) {
86324 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
86325 index c33b6bb..b51f19e 100644
86326 --- a/security/smack/smack_lsm.c
86327 +++ b/security/smack/smack_lsm.c
86328 @@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
86329 *
86330 * Returns 0 on success, error code otherwise.
86331 */
86332 -static int smack_syslog(int type)
86333 +static int smack_syslog(int type, bool from_file)
86334 {
86335 int rc;
86336 char *sp = current_security();
86337
86338 - rc = cap_syslog(type);
86339 + rc = cap_syslog(type, from_file);
86340 if (rc != 0)
86341 return rc;
86342
86343 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
86344 return 0;
86345 }
86346
86347 -struct security_operations smack_ops = {
86348 +struct security_operations smack_ops __read_only = {
86349 .name = "smack",
86350
86351 .ptrace_access_check = smack_ptrace_access_check,
86352 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
86353 index 9548a09..9a5f384 100644
86354 --- a/security/tomoyo/tomoyo.c
86355 +++ b/security/tomoyo/tomoyo.c
86356 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
86357 * tomoyo_security_ops is a "struct security_operations" which is used for
86358 * registering TOMOYO.
86359 */
86360 -static struct security_operations tomoyo_security_ops = {
86361 +static struct security_operations tomoyo_security_ops __read_only = {
86362 .name = "tomoyo",
86363 .cred_alloc_blank = tomoyo_cred_alloc_blank,
86364 .cred_prepare = tomoyo_cred_prepare,
86365 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
86366 index 84bb07d..c2ab6b6 100644
86367 --- a/sound/aoa/codecs/onyx.c
86368 +++ b/sound/aoa/codecs/onyx.c
86369 @@ -53,7 +53,7 @@ struct onyx {
86370 spdif_locked:1,
86371 analog_locked:1,
86372 original_mute:2;
86373 - int open_count;
86374 + local_t open_count;
86375 struct codec_info *codec_info;
86376
86377 /* mutex serializes concurrent access to the device
86378 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_item *cii,
86379 struct onyx *onyx = cii->codec_data;
86380
86381 mutex_lock(&onyx->mutex);
86382 - onyx->open_count++;
86383 + local_inc(&onyx->open_count);
86384 mutex_unlock(&onyx->mutex);
86385
86386 return 0;
86387 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_item *cii,
86388 struct onyx *onyx = cii->codec_data;
86389
86390 mutex_lock(&onyx->mutex);
86391 - onyx->open_count--;
86392 - if (!onyx->open_count)
86393 + if (local_dec_and_test(&onyx->open_count))
86394 onyx->spdif_locked = onyx->analog_locked = 0;
86395 mutex_unlock(&onyx->mutex);
86396
86397 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
86398 index ffd2025..df062c9 100644
86399 --- a/sound/aoa/codecs/onyx.h
86400 +++ b/sound/aoa/codecs/onyx.h
86401 @@ -11,6 +11,7 @@
86402 #include <linux/i2c.h>
86403 #include <asm/pmac_low_i2c.h>
86404 #include <asm/prom.h>
86405 +#include <asm/local.h>
86406
86407 /* PCM3052 register definitions */
86408
86409 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
86410 index d9c9635..bc0a5a2 100644
86411 --- a/sound/core/oss/pcm_oss.c
86412 +++ b/sound/core/oss/pcm_oss.c
86413 @@ -1395,7 +1395,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
86414 }
86415 } else {
86416 tmp = snd_pcm_oss_write2(substream,
86417 - (const char __force *)buf,
86418 + (const char __force_kernel *)buf,
86419 runtime->oss.period_bytes, 0);
86420 if (tmp <= 0)
86421 goto err;
86422 @@ -1483,7 +1483,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
86423 xfer += tmp;
86424 runtime->oss.buffer_used -= tmp;
86425 } else {
86426 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
86427 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
86428 runtime->oss.period_bytes, 0);
86429 if (tmp <= 0)
86430 goto err;
86431 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
86432 index 038232d..7dd9e5c 100644
86433 --- a/sound/core/pcm_compat.c
86434 +++ b/sound/core/pcm_compat.c
86435 @@ -30,7 +30,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
86436 int err;
86437
86438 fs = snd_enter_user();
86439 - err = snd_pcm_delay(substream, &delay);
86440 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
86441 snd_leave_user(fs);
86442 if (err < 0)
86443 return err;
86444 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
86445 index e6d2d97..4843949 100644
86446 --- a/sound/core/pcm_native.c
86447 +++ b/sound/core/pcm_native.c
86448 @@ -2747,11 +2747,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
86449 switch (substream->stream) {
86450 case SNDRV_PCM_STREAM_PLAYBACK:
86451 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
86452 - (void __user *)arg);
86453 + (void __force_user *)arg);
86454 break;
86455 case SNDRV_PCM_STREAM_CAPTURE:
86456 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
86457 - (void __user *)arg);
86458 + (void __force_user *)arg);
86459 break;
86460 default:
86461 result = -EINVAL;
86462 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
86463 index 1f99767..14636533 100644
86464 --- a/sound/core/seq/seq_device.c
86465 +++ b/sound/core/seq/seq_device.c
86466 @@ -63,7 +63,7 @@ struct ops_list {
86467 int argsize; /* argument size */
86468
86469 /* operators */
86470 - struct snd_seq_dev_ops ops;
86471 + struct snd_seq_dev_ops *ops;
86472
86473 /* registred devices */
86474 struct list_head dev_list; /* list of devices */
86475 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
86476
86477 mutex_lock(&ops->reg_mutex);
86478 /* copy driver operators */
86479 - ops->ops = *entry;
86480 + ops->ops = entry;
86481 ops->driver |= DRIVER_LOADED;
86482 ops->argsize = argsize;
86483
86484 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
86485 dev->name, ops->id, ops->argsize, dev->argsize);
86486 return -EINVAL;
86487 }
86488 - if (ops->ops.init_device(dev) >= 0) {
86489 + if (ops->ops->init_device(dev) >= 0) {
86490 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
86491 ops->num_init_devices++;
86492 } else {
86493 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
86494 dev->name, ops->id, ops->argsize, dev->argsize);
86495 return -EINVAL;
86496 }
86497 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
86498 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
86499 dev->status = SNDRV_SEQ_DEVICE_FREE;
86500 dev->driver_data = NULL;
86501 ops->num_init_devices--;
86502 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
86503 index 9284829..ac8e8b2 100644
86504 --- a/sound/drivers/mts64.c
86505 +++ b/sound/drivers/mts64.c
86506 @@ -27,6 +27,7 @@
86507 #include <sound/initval.h>
86508 #include <sound/rawmidi.h>
86509 #include <sound/control.h>
86510 +#include <asm/local.h>
86511
86512 #define CARD_NAME "Miditerminal 4140"
86513 #define DRIVER_NAME "MTS64"
86514 @@ -65,7 +66,7 @@ struct mts64 {
86515 struct pardevice *pardev;
86516 int pardev_claimed;
86517
86518 - int open_count;
86519 + local_t open_count;
86520 int current_midi_output_port;
86521 int current_midi_input_port;
86522 u8 mode[MTS64_NUM_INPUT_PORTS];
86523 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
86524 {
86525 struct mts64 *mts = substream->rmidi->private_data;
86526
86527 - if (mts->open_count == 0) {
86528 + if (local_read(&mts->open_count) == 0) {
86529 /* We don't need a spinlock here, because this is just called
86530 if the device has not been opened before.
86531 So there aren't any IRQs from the device */
86532 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
86533
86534 msleep(50);
86535 }
86536 - ++(mts->open_count);
86537 + local_inc(&mts->open_count);
86538
86539 return 0;
86540 }
86541 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
86542 struct mts64 *mts = substream->rmidi->private_data;
86543 unsigned long flags;
86544
86545 - --(mts->open_count);
86546 - if (mts->open_count == 0) {
86547 + if (local_dec_return(&mts->open_count) == 0) {
86548 /* We need the spinlock_irqsave here because we can still
86549 have IRQs at this point */
86550 spin_lock_irqsave(&mts->lock, flags);
86551 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
86552
86553 msleep(500);
86554
86555 - } else if (mts->open_count < 0)
86556 - mts->open_count = 0;
86557 + } else if (local_read(&mts->open_count) < 0)
86558 + local_set(&mts->open_count, 0);
86559
86560 return 0;
86561 }
86562 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
86563 index 01997f2..cbc1195 100644
86564 --- a/sound/drivers/opl4/opl4_lib.c
86565 +++ b/sound/drivers/opl4/opl4_lib.c
86566 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
86567 MODULE_DESCRIPTION("OPL4 driver");
86568 MODULE_LICENSE("GPL");
86569
86570 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
86571 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
86572 {
86573 int timeout = 10;
86574 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
86575 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
86576 index 60158e2..0a0cc1a 100644
86577 --- a/sound/drivers/portman2x4.c
86578 +++ b/sound/drivers/portman2x4.c
86579 @@ -46,6 +46,7 @@
86580 #include <sound/initval.h>
86581 #include <sound/rawmidi.h>
86582 #include <sound/control.h>
86583 +#include <asm/local.h>
86584
86585 #define CARD_NAME "Portman 2x4"
86586 #define DRIVER_NAME "portman"
86587 @@ -83,7 +84,7 @@ struct portman {
86588 struct pardevice *pardev;
86589 int pardev_claimed;
86590
86591 - int open_count;
86592 + local_t open_count;
86593 int mode[PORTMAN_NUM_INPUT_PORTS];
86594 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
86595 };
86596 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
86597 index 02f79d2..8691d43 100644
86598 --- a/sound/isa/cmi8330.c
86599 +++ b/sound/isa/cmi8330.c
86600 @@ -173,7 +173,7 @@ struct snd_cmi8330 {
86601
86602 struct snd_pcm *pcm;
86603 struct snd_cmi8330_stream {
86604 - struct snd_pcm_ops ops;
86605 + snd_pcm_ops_no_const ops;
86606 snd_pcm_open_callback_t open;
86607 void *private_data; /* sb or wss */
86608 } streams[2];
86609 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
86610 index 733b014..56ce96f 100644
86611 --- a/sound/oss/sb_audio.c
86612 +++ b/sound/oss/sb_audio.c
86613 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
86614 buf16 = (signed short *)(localbuf + localoffs);
86615 while (c)
86616 {
86617 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
86618 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
86619 if (copy_from_user(lbuf8,
86620 userbuf+useroffs + p,
86621 locallen))
86622 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
86623 index 3136c88..28ad950 100644
86624 --- a/sound/oss/swarm_cs4297a.c
86625 +++ b/sound/oss/swarm_cs4297a.c
86626 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
86627 {
86628 struct cs4297a_state *s;
86629 u32 pwr, id;
86630 - mm_segment_t fs;
86631 int rval;
86632 #ifndef CONFIG_BCM_CS4297A_CSWARM
86633 u64 cfg;
86634 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
86635 if (!rval) {
86636 char *sb1250_duart_present;
86637
86638 +#if 0
86639 + mm_segment_t fs;
86640 fs = get_fs();
86641 set_fs(KERNEL_DS);
86642 -#if 0
86643 val = SOUND_MASK_LINE;
86644 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
86645 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
86646 val = initvol[i].vol;
86647 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
86648 }
86649 + set_fs(fs);
86650 // cs4297a_write_ac97(s, 0x18, 0x0808);
86651 #else
86652 // cs4297a_write_ac97(s, 0x5e, 0x180);
86653 cs4297a_write_ac97(s, 0x02, 0x0808);
86654 cs4297a_write_ac97(s, 0x18, 0x0808);
86655 #endif
86656 - set_fs(fs);
86657
86658 list_add(&s->list, &cs4297a_devs);
86659
86660 diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
86661 index 78288db..0406809 100644
86662 --- a/sound/pci/ac97/ac97_codec.c
86663 +++ b/sound/pci/ac97/ac97_codec.c
86664 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struct snd_device *device)
86665 }
86666
86667 /* build_ops to do nothing */
86668 -static struct snd_ac97_build_ops null_build_ops;
86669 +static const struct snd_ac97_build_ops null_build_ops;
86670
86671 #ifdef CONFIG_SND_AC97_POWER_SAVE
86672 static void do_update_power(struct work_struct *work)
86673 diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
86674 index eeb2e23..82bf625 100644
86675 --- a/sound/pci/ac97/ac97_patch.c
86676 +++ b/sound/pci/ac97/ac97_patch.c
86677 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spdif(struct snd_ac97 *ac97)
86678 return 0;
86679 }
86680
86681 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
86682 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
86683 .build_spdif = patch_yamaha_ymf743_build_spdif,
86684 .build_3d = patch_yamaha_ymf7x3_3d,
86685 };
86686 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdif(struct snd_ac97 * ac97)
86687 return 0;
86688 }
86689
86690 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
86691 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
86692 .build_3d = patch_yamaha_ymf7x3_3d,
86693 .build_post_spdif = patch_yamaha_ymf753_post_spdif
86694 };
86695 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific(struct snd_ac97 * ac97)
86696 return 0;
86697 }
86698
86699 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
86700 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
86701 .build_specific = patch_wolfson_wm9703_specific,
86702 };
86703
86704 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific(struct snd_ac97 * ac97)
86705 return 0;
86706 }
86707
86708 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
86709 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
86710 .build_specific = patch_wolfson_wm9704_specific,
86711 };
86712
86713 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific(struct snd_ac97 * ac97)
86714 return 0;
86715 }
86716
86717 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
86718 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
86719 .build_specific = patch_wolfson_wm9705_specific,
86720 };
86721
86722 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific(struct snd_ac97 * ac97)
86723 return 0;
86724 }
86725
86726 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
86727 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
86728 .build_specific = patch_wolfson_wm9711_specific,
86729 };
86730
86731 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume (struct snd_ac97 * ac97)
86732 }
86733 #endif
86734
86735 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
86736 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
86737 .build_specific = patch_wolfson_wm9713_specific,
86738 .build_3d = patch_wolfson_wm9713_3d,
86739 #ifdef CONFIG_PM
86740 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_specific(struct snd_ac97 * ac97)
86741 return 0;
86742 }
86743
86744 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
86745 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
86746 .build_3d = patch_sigmatel_stac9700_3d,
86747 .build_specific = patch_sigmatel_stac97xx_specific
86748 };
86749 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_specific(struct snd_ac97 *ac97)
86750 return patch_sigmatel_stac97xx_specific(ac97);
86751 }
86752
86753 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
86754 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
86755 .build_3d = patch_sigmatel_stac9708_3d,
86756 .build_specific = patch_sigmatel_stac9708_specific
86757 };
86758 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_specific(struct snd_ac97 *ac97)
86759 return 0;
86760 }
86761
86762 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
86763 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
86764 .build_3d = patch_sigmatel_stac9700_3d,
86765 .build_specific = patch_sigmatel_stac9758_specific
86766 };
86767 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(struct snd_ac97 * ac97)
86768 return 0;
86769 }
86770
86771 -static struct snd_ac97_build_ops patch_cirrus_ops = {
86772 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
86773 .build_spdif = patch_cirrus_build_spdif
86774 };
86775
86776 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(struct snd_ac97 * ac97)
86777 return 0;
86778 }
86779
86780 -static struct snd_ac97_build_ops patch_conexant_ops = {
86781 +static const struct snd_ac97_build_ops patch_conexant_ops = {
86782 .build_spdif = patch_conexant_build_spdif
86783 };
86784
86785 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct snd_ac97 * ac97, int unchained_idx, int
86786 }
86787 }
86788
86789 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
86790 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
86791 #ifdef CONFIG_PM
86792 .resume = ad18xx_resume
86793 #endif
86794 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct snd_ac97 * ac97)
86795 return 0;
86796 }
86797
86798 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
86799 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
86800 .build_specific = &patch_ad1885_specific,
86801 #ifdef CONFIG_PM
86802 .resume = ad18xx_resume
86803 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct snd_ac97 * ac97)
86804 return 0;
86805 }
86806
86807 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
86808 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
86809 .build_specific = &patch_ad1886_specific,
86810 #ifdef CONFIG_PM
86811 .resume = ad18xx_resume
86812 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct snd_ac97 * ac97)
86813 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
86814 }
86815
86816 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
86817 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
86818 .build_post_spdif = patch_ad198x_post_spdif,
86819 .build_specific = patch_ad1981a_specific,
86820 #ifdef CONFIG_PM
86821 @@ -1952,7 +1952,7 @@ static int patch_ad1981b_specific(struct snd_ac97 *ac97)
86822 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
86823 }
86824
86825 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
86826 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
86827 .build_post_spdif = patch_ad198x_post_spdif,
86828 .build_specific = patch_ad1981b_specific,
86829 #ifdef CONFIG_PM
86830 @@ -2091,7 +2091,7 @@ static int patch_ad1888_specific(struct snd_ac97 *ac97)
86831 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
86832 }
86833
86834 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
86835 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
86836 .build_post_spdif = patch_ad198x_post_spdif,
86837 .build_specific = patch_ad1888_specific,
86838 #ifdef CONFIG_PM
86839 @@ -2140,7 +2140,7 @@ static int patch_ad1980_specific(struct snd_ac97 *ac97)
86840 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
86841 }
86842
86843 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
86844 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
86845 .build_post_spdif = patch_ad198x_post_spdif,
86846 .build_specific = patch_ad1980_specific,
86847 #ifdef CONFIG_PM
86848 @@ -2255,7 +2255,7 @@ static int patch_ad1985_specific(struct snd_ac97 *ac97)
86849 ARRAY_SIZE(snd_ac97_ad1985_controls));
86850 }
86851
86852 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
86853 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
86854 .build_post_spdif = patch_ad198x_post_spdif,
86855 .build_specific = patch_ad1985_specific,
86856 #ifdef CONFIG_PM
86857 @@ -2547,7 +2547,7 @@ static int patch_ad1986_specific(struct snd_ac97 *ac97)
86858 ARRAY_SIZE(snd_ac97_ad1985_controls));
86859 }
86860
86861 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
86862 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
86863 .build_post_spdif = patch_ad198x_post_spdif,
86864 .build_specific = patch_ad1986_specific,
86865 #ifdef CONFIG_PM
86866 @@ -2652,7 +2652,7 @@ static int patch_alc650_specific(struct snd_ac97 * ac97)
86867 return 0;
86868 }
86869
86870 -static struct snd_ac97_build_ops patch_alc650_ops = {
86871 +static const struct snd_ac97_build_ops patch_alc650_ops = {
86872 .build_specific = patch_alc650_specific,
86873 .update_jacks = alc650_update_jacks
86874 };
86875 @@ -2804,7 +2804,7 @@ static int patch_alc655_specific(struct snd_ac97 * ac97)
86876 return 0;
86877 }
86878
86879 -static struct snd_ac97_build_ops patch_alc655_ops = {
86880 +static const struct snd_ac97_build_ops patch_alc655_ops = {
86881 .build_specific = patch_alc655_specific,
86882 .update_jacks = alc655_update_jacks
86883 };
86884 @@ -2916,7 +2916,7 @@ static int patch_alc850_specific(struct snd_ac97 *ac97)
86885 return 0;
86886 }
86887
86888 -static struct snd_ac97_build_ops patch_alc850_ops = {
86889 +static const struct snd_ac97_build_ops patch_alc850_ops = {
86890 .build_specific = patch_alc850_specific,
86891 .update_jacks = alc850_update_jacks
86892 };
86893 @@ -2978,7 +2978,7 @@ static int patch_cm9738_specific(struct snd_ac97 * ac97)
86894 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
86895 }
86896
86897 -static struct snd_ac97_build_ops patch_cm9738_ops = {
86898 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
86899 .build_specific = patch_cm9738_specific,
86900 .update_jacks = cm9738_update_jacks
86901 };
86902 @@ -3069,7 +3069,7 @@ static int patch_cm9739_post_spdif(struct snd_ac97 * ac97)
86903 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
86904 }
86905
86906 -static struct snd_ac97_build_ops patch_cm9739_ops = {
86907 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
86908 .build_specific = patch_cm9739_specific,
86909 .build_post_spdif = patch_cm9739_post_spdif,
86910 .update_jacks = cm9739_update_jacks
86911 @@ -3243,7 +3243,7 @@ static int patch_cm9761_specific(struct snd_ac97 * ac97)
86912 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
86913 }
86914
86915 -static struct snd_ac97_build_ops patch_cm9761_ops = {
86916 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
86917 .build_specific = patch_cm9761_specific,
86918 .build_post_spdif = patch_cm9761_post_spdif,
86919 .update_jacks = cm9761_update_jacks
86920 @@ -3339,7 +3339,7 @@ static int patch_cm9780_specific(struct snd_ac97 *ac97)
86921 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
86922 }
86923
86924 -static struct snd_ac97_build_ops patch_cm9780_ops = {
86925 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
86926 .build_specific = patch_cm9780_specific,
86927 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
86928 };
86929 @@ -3459,7 +3459,7 @@ static int patch_vt1616_specific(struct snd_ac97 * ac97)
86930 return 0;
86931 }
86932
86933 -static struct snd_ac97_build_ops patch_vt1616_ops = {
86934 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
86935 .build_specific = patch_vt1616_specific
86936 };
86937
86938 @@ -3813,7 +3813,7 @@ static int patch_it2646_specific(struct snd_ac97 * ac97)
86939 return 0;
86940 }
86941
86942 -static struct snd_ac97_build_ops patch_it2646_ops = {
86943 +static const struct snd_ac97_build_ops patch_it2646_ops = {
86944 .build_specific = patch_it2646_specific,
86945 .update_jacks = it2646_update_jacks
86946 };
86947 @@ -3847,7 +3847,7 @@ static int patch_si3036_specific(struct snd_ac97 * ac97)
86948 return 0;
86949 }
86950
86951 -static struct snd_ac97_build_ops patch_si3036_ops = {
86952 +static const struct snd_ac97_build_ops patch_si3036_ops = {
86953 .build_specific = patch_si3036_specific,
86954 };
86955
86956 @@ -3914,7 +3914,7 @@ static int patch_ucb1400_specific(struct snd_ac97 * ac97)
86957 return 0;
86958 }
86959
86960 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
86961 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
86962 .build_specific = patch_ucb1400_specific,
86963 };
86964
86965 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
86966 index 99552fb..4dcc2c5 100644
86967 --- a/sound/pci/hda/hda_codec.h
86968 +++ b/sound/pci/hda/hda_codec.h
86969 @@ -580,7 +580,7 @@ struct hda_bus_ops {
86970 /* notify power-up/down from codec to controller */
86971 void (*pm_notify)(struct hda_bus *bus);
86972 #endif
86973 -};
86974 +} __no_const;
86975
86976 /* template to pass to the bus constructor */
86977 struct hda_bus_template {
86978 @@ -675,6 +675,7 @@ struct hda_codec_ops {
86979 int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
86980 #endif
86981 };
86982 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
86983
86984 /* record for amp information cache */
86985 struct hda_cache_head {
86986 @@ -705,7 +706,7 @@ struct hda_pcm_ops {
86987 struct snd_pcm_substream *substream);
86988 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
86989 struct snd_pcm_substream *substream);
86990 -};
86991 +} __no_const;
86992
86993 /* PCM information for each substream */
86994 struct hda_pcm_stream {
86995 @@ -760,7 +761,7 @@ struct hda_codec {
86996 const char *modelname; /* model name for preset */
86997
86998 /* set by patch */
86999 - struct hda_codec_ops patch_ops;
87000 + hda_codec_ops_no_const patch_ops;
87001
87002 /* PCM to create, set by patch_ops.build_pcms callback */
87003 unsigned int num_pcms;
87004 diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c
87005 index fb684f0..2b11cea 100644
87006 --- a/sound/pci/hda/patch_atihdmi.c
87007 +++ b/sound/pci/hda/patch_atihdmi.c
87008 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_codec *codec)
87009 */
87010 spec->multiout.dig_out_nid = CVT_NID;
87011
87012 - codec->patch_ops = atihdmi_patch_ops;
87013 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
87014
87015 return 0;
87016 }
87017 diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
87018 index 7c23016..c5bfdd7 100644
87019 --- a/sound/pci/hda/patch_intelhdmi.c
87020 +++ b/sound/pci/hda/patch_intelhdmi.c
87021 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
87022 cp_ready);
87023
87024 /* TODO */
87025 - if (cp_state)
87026 - ;
87027 - if (cp_ready)
87028 - ;
87029 + if (cp_state) {
87030 + }
87031 + if (cp_ready) {
87032 + }
87033 }
87034
87035
87036 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hda_codec *codec)
87037 spec->multiout.dig_out_nid = cvt_nid;
87038
87039 codec->spec = spec;
87040 - codec->patch_ops = intel_hdmi_patch_ops;
87041 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
87042
87043 snd_hda_eld_proc_new(codec, &spec->sink_eld);
87044
87045 diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
87046 index 6afdab0..68ed352 100644
87047 --- a/sound/pci/hda/patch_nvhdmi.c
87048 +++ b/sound/pci/hda/patch_nvhdmi.c
87049 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_codec *codec)
87050 spec->multiout.max_channels = 8;
87051 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
87052
87053 - codec->patch_ops = nvhdmi_patch_ops_8ch;
87054 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
87055
87056 return 0;
87057 }
87058 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
87059 spec->multiout.max_channels = 2;
87060 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
87061
87062 - codec->patch_ops = nvhdmi_patch_ops_2ch;
87063 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
87064
87065 return 0;
87066 }
87067 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
87068 index 2fcd70d..a143eaf 100644
87069 --- a/sound/pci/hda/patch_sigmatel.c
87070 +++ b/sound/pci/hda/patch_sigmatel.c
87071 @@ -5220,7 +5220,7 @@ again:
87072 snd_hda_codec_write_cache(codec, nid, 0,
87073 AC_VERB_SET_CONNECT_SEL, num_dacs);
87074
87075 - codec->patch_ops = stac92xx_patch_ops;
87076 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
87077
87078 codec->proc_widget_hook = stac92hd_proc_hook;
87079
87080 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
87081 return -ENOMEM;
87082
87083 codec->spec = spec;
87084 - codec->patch_ops = stac92xx_patch_ops;
87085 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
87086 spec->num_pins = STAC92HD71BXX_NUM_PINS;
87087 switch (codec->vendor_id) {
87088 case 0x111d76b6:
87089 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
87090 index d063149..01599a4 100644
87091 --- a/sound/pci/ice1712/ice1712.h
87092 +++ b/sound/pci/ice1712/ice1712.h
87093 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
87094 unsigned int mask_flags; /* total mask bits */
87095 struct snd_akm4xxx_ops {
87096 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
87097 - } ops;
87098 + } __no_const ops;
87099 };
87100
87101 struct snd_ice1712_spdif {
87102 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
87103 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
87104 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
87105 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
87106 - } ops;
87107 + } __no_const ops;
87108 };
87109
87110
87111 diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
87112 index 9e7d12e..3e3bc64 100644
87113 --- a/sound/pci/intel8x0m.c
87114 +++ b/sound/pci/intel8x0m.c
87115 @@ -1264,7 +1264,7 @@ static struct shortname_table {
87116 { 0x5455, "ALi M5455" },
87117 { 0x746d, "AMD AMD8111" },
87118 #endif
87119 - { 0 },
87120 + { 0, },
87121 };
87122
87123 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
87124 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
87125 index 5518371..45cf7ac 100644
87126 --- a/sound/pci/ymfpci/ymfpci_main.c
87127 +++ b/sound/pci/ymfpci/ymfpci_main.c
87128 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
87129 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
87130 break;
87131 }
87132 - if (atomic_read(&chip->interrupt_sleep_count)) {
87133 - atomic_set(&chip->interrupt_sleep_count, 0);
87134 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
87135 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
87136 wake_up(&chip->interrupt_sleep);
87137 }
87138 __end:
87139 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
87140 continue;
87141 init_waitqueue_entry(&wait, current);
87142 add_wait_queue(&chip->interrupt_sleep, &wait);
87143 - atomic_inc(&chip->interrupt_sleep_count);
87144 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
87145 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
87146 remove_wait_queue(&chip->interrupt_sleep, &wait);
87147 }
87148 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
87149 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
87150 spin_unlock(&chip->reg_lock);
87151
87152 - if (atomic_read(&chip->interrupt_sleep_count)) {
87153 - atomic_set(&chip->interrupt_sleep_count, 0);
87154 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
87155 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
87156 wake_up(&chip->interrupt_sleep);
87157 }
87158 }
87159 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
87160 spin_lock_init(&chip->reg_lock);
87161 spin_lock_init(&chip->voice_lock);
87162 init_waitqueue_head(&chip->interrupt_sleep);
87163 - atomic_set(&chip->interrupt_sleep_count, 0);
87164 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
87165 chip->card = card;
87166 chip->pci = pci;
87167 chip->irq = -1;
87168 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
87169 index 0a1b2f6..776bb19 100644
87170 --- a/sound/soc/soc-core.c
87171 +++ b/sound/soc/soc-core.c
87172 @@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
87173 }
87174
87175 /* ASoC PCM operations */
87176 -static struct snd_pcm_ops soc_pcm_ops = {
87177 +static snd_pcm_ops_no_const soc_pcm_ops = {
87178 .open = soc_pcm_open,
87179 .close = soc_codec_close,
87180 .hw_params = soc_pcm_hw_params,
87181 diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
87182 index 79633ea..9732e90 100644
87183 --- a/sound/usb/usbaudio.c
87184 +++ b/sound/usb/usbaudio.c
87185 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(struct snd_pcm_substream *substream,
87186 switch (cmd) {
87187 case SNDRV_PCM_TRIGGER_START:
87188 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
87189 - subs->ops.prepare = prepare_playback_urb;
87190 + *(void **)&subs->ops.prepare = prepare_playback_urb;
87191 return 0;
87192 case SNDRV_PCM_TRIGGER_STOP:
87193 return deactivate_urbs(subs, 0, 0);
87194 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
87195 - subs->ops.prepare = prepare_nodata_playback_urb;
87196 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
87197 return 0;
87198 default:
87199 return -EINVAL;
87200 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(struct snd_pcm_substream *substream,
87201
87202 switch (cmd) {
87203 case SNDRV_PCM_TRIGGER_START:
87204 - subs->ops.retire = retire_capture_urb;
87205 + *(void **)&subs->ops.retire = retire_capture_urb;
87206 return start_urbs(subs, substream->runtime);
87207 case SNDRV_PCM_TRIGGER_STOP:
87208 return deactivate_urbs(subs, 0, 0);
87209 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
87210 - subs->ops.retire = retire_paused_capture_urb;
87211 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
87212 return 0;
87213 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
87214 - subs->ops.retire = retire_capture_urb;
87215 + *(void **)&subs->ops.retire = retire_capture_urb;
87216 return 0;
87217 default:
87218 return -EINVAL;
87219 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
87220 /* for playback, submit the URBs now; otherwise, the first hwptr_done
87221 * updates for all URBs would happen at the same time when starting */
87222 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
87223 - subs->ops.prepare = prepare_nodata_playback_urb;
87224 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
87225 return start_urbs(subs, runtime);
87226 } else
87227 return 0;
87228 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo
87229 subs->direction = stream;
87230 subs->dev = as->chip->dev;
87231 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
87232 - subs->ops = audio_urb_ops[stream];
87233 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
87234 } else {
87235 - subs->ops = audio_urb_ops_high_speed[stream];
87236 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
87237 switch (as->chip->usb_id) {
87238 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
87239 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
87240 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
87241 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
87242 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
87243 break;
87244 }
87245 }
87246 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
87247 new file mode 100644
87248 index 0000000..469b06a
87249 --- /dev/null
87250 +++ b/tools/gcc/Makefile
87251 @@ -0,0 +1,21 @@
87252 +#CC := gcc
87253 +#PLUGIN_SOURCE_FILES := pax_plugin.c
87254 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
87255 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
87256 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
87257 +
87258 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
87259 +
87260 +hostlibs-y := constify_plugin.so
87261 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
87262 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
87263 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
87264 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
87265 +
87266 +always := $(hostlibs-y)
87267 +
87268 +constify_plugin-objs := constify_plugin.o
87269 +stackleak_plugin-objs := stackleak_plugin.o
87270 +kallocstat_plugin-objs := kallocstat_plugin.o
87271 +kernexec_plugin-objs := kernexec_plugin.o
87272 +checker_plugin-objs := checker_plugin.o
87273 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
87274 new file mode 100644
87275 index 0000000..d41b5af
87276 --- /dev/null
87277 +++ b/tools/gcc/checker_plugin.c
87278 @@ -0,0 +1,171 @@
87279 +/*
87280 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87281 + * Licensed under the GPL v2
87282 + *
87283 + * Note: the choice of the license means that the compilation process is
87284 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87285 + * but for the kernel it doesn't matter since it doesn't link against
87286 + * any of the gcc libraries
87287 + *
87288 + * gcc plugin to implement various sparse (source code checker) features
87289 + *
87290 + * TODO:
87291 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
87292 + *
87293 + * BUGS:
87294 + * - none known
87295 + */
87296 +#include "gcc-plugin.h"
87297 +#include "config.h"
87298 +#include "system.h"
87299 +#include "coretypes.h"
87300 +#include "tree.h"
87301 +#include "tree-pass.h"
87302 +#include "flags.h"
87303 +#include "intl.h"
87304 +#include "toplev.h"
87305 +#include "plugin.h"
87306 +//#include "expr.h" where are you...
87307 +#include "diagnostic.h"
87308 +#include "plugin-version.h"
87309 +#include "tm.h"
87310 +#include "function.h"
87311 +#include "basic-block.h"
87312 +#include "gimple.h"
87313 +#include "rtl.h"
87314 +#include "emit-rtl.h"
87315 +#include "tree-flow.h"
87316 +#include "target.h"
87317 +
87318 +extern void c_register_addr_space (const char *str, addr_space_t as);
87319 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
87320 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
87321 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
87322 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
87323 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
87324 +
87325 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87326 +extern rtx emit_move_insn(rtx x, rtx y);
87327 +
87328 +int plugin_is_GPL_compatible;
87329 +
87330 +static struct plugin_info checker_plugin_info = {
87331 + .version = "201111150100",
87332 +};
87333 +
87334 +#define ADDR_SPACE_KERNEL 0
87335 +#define ADDR_SPACE_FORCE_KERNEL 1
87336 +#define ADDR_SPACE_USER 2
87337 +#define ADDR_SPACE_FORCE_USER 3
87338 +#define ADDR_SPACE_IOMEM 0
87339 +#define ADDR_SPACE_FORCE_IOMEM 0
87340 +#define ADDR_SPACE_PERCPU 0
87341 +#define ADDR_SPACE_FORCE_PERCPU 0
87342 +#define ADDR_SPACE_RCU 0
87343 +#define ADDR_SPACE_FORCE_RCU 0
87344 +
87345 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
87346 +{
87347 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
87348 +}
87349 +
87350 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
87351 +{
87352 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
87353 +}
87354 +
87355 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
87356 +{
87357 + return default_addr_space_valid_pointer_mode(mode, as);
87358 +}
87359 +
87360 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
87361 +{
87362 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
87363 +}
87364 +
87365 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
87366 +{
87367 + return default_addr_space_legitimize_address(x, oldx, mode, as);
87368 +}
87369 +
87370 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
87371 +{
87372 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
87373 + return true;
87374 +
87375 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
87376 + return true;
87377 +
87378 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
87379 + return true;
87380 +
87381 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
87382 + return true;
87383 +
87384 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
87385 + return true;
87386 +
87387 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
87388 + return true;
87389 +
87390 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
87391 + return true;
87392 +
87393 + return subset == superset;
87394 +}
87395 +
87396 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
87397 +{
87398 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
87399 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
87400 +
87401 + return op;
87402 +}
87403 +
87404 +static void register_checker_address_spaces(void *event_data, void *data)
87405 +{
87406 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
87407 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
87408 + c_register_addr_space("__user", ADDR_SPACE_USER);
87409 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
87410 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
87411 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
87412 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
87413 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
87414 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
87415 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
87416 +
87417 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
87418 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
87419 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
87420 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
87421 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
87422 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
87423 + targetm.addr_space.convert = checker_addr_space_convert;
87424 +}
87425 +
87426 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87427 +{
87428 + const char * const plugin_name = plugin_info->base_name;
87429 + const int argc = plugin_info->argc;
87430 + const struct plugin_argument * const argv = plugin_info->argv;
87431 + int i;
87432 +
87433 + if (!plugin_default_version_check(version, &gcc_version)) {
87434 + error(G_("incompatible gcc/plugin versions"));
87435 + return 1;
87436 + }
87437 +
87438 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
87439 +
87440 + for (i = 0; i < argc; ++i)
87441 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87442 +
87443 + if (TARGET_64BIT == 0)
87444 + return 0;
87445 +
87446 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
87447 +
87448 + return 0;
87449 +}
87450 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
87451 new file mode 100644
87452 index 0000000..704a564
87453 --- /dev/null
87454 +++ b/tools/gcc/constify_plugin.c
87455 @@ -0,0 +1,303 @@
87456 +/*
87457 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
87458 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
87459 + * Licensed under the GPL v2, or (at your option) v3
87460 + *
87461 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
87462 + *
87463 + * Homepage:
87464 + * http://www.grsecurity.net/~ephox/const_plugin/
87465 + *
87466 + * Usage:
87467 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
87468 + * $ gcc -fplugin=constify_plugin.so test.c -O2
87469 + */
87470 +
87471 +#include "gcc-plugin.h"
87472 +#include "config.h"
87473 +#include "system.h"
87474 +#include "coretypes.h"
87475 +#include "tree.h"
87476 +#include "tree-pass.h"
87477 +#include "flags.h"
87478 +#include "intl.h"
87479 +#include "toplev.h"
87480 +#include "plugin.h"
87481 +#include "diagnostic.h"
87482 +#include "plugin-version.h"
87483 +#include "tm.h"
87484 +#include "function.h"
87485 +#include "basic-block.h"
87486 +#include "gimple.h"
87487 +#include "rtl.h"
87488 +#include "emit-rtl.h"
87489 +#include "tree-flow.h"
87490 +
87491 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
87492 +
87493 +int plugin_is_GPL_compatible;
87494 +
87495 +static struct plugin_info const_plugin_info = {
87496 + .version = "201111150100",
87497 + .help = "no-constify\tturn off constification\n",
87498 +};
87499 +
87500 +static void constify_type(tree type);
87501 +static bool walk_struct(tree node);
87502 +
87503 +static tree deconstify_type(tree old_type)
87504 +{
87505 + tree new_type, field;
87506 +
87507 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
87508 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
87509 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
87510 + DECL_FIELD_CONTEXT(field) = new_type;
87511 + TYPE_READONLY(new_type) = 0;
87512 + C_TYPE_FIELDS_READONLY(new_type) = 0;
87513 + return new_type;
87514 +}
87515 +
87516 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
87517 +{
87518 + tree type;
87519 +
87520 + *no_add_attrs = true;
87521 + if (TREE_CODE(*node) == FUNCTION_DECL) {
87522 + error("%qE attribute does not apply to functions", name);
87523 + return NULL_TREE;
87524 + }
87525 +
87526 + if (TREE_CODE(*node) == VAR_DECL) {
87527 + error("%qE attribute does not apply to variables", name);
87528 + return NULL_TREE;
87529 + }
87530 +
87531 + if (TYPE_P(*node)) {
87532 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
87533 + *no_add_attrs = false;
87534 + else
87535 + error("%qE attribute applies to struct and union types only", name);
87536 + return NULL_TREE;
87537 + }
87538 +
87539 + type = TREE_TYPE(*node);
87540 +
87541 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
87542 + error("%qE attribute applies to struct and union types only", name);
87543 + return NULL_TREE;
87544 + }
87545 +
87546 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
87547 + error("%qE attribute is already applied to the type", name);
87548 + return NULL_TREE;
87549 + }
87550 +
87551 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
87552 + error("%qE attribute used on type that is not constified", name);
87553 + return NULL_TREE;
87554 + }
87555 +
87556 + if (TREE_CODE(*node) == TYPE_DECL) {
87557 + TREE_TYPE(*node) = deconstify_type(type);
87558 + TREE_READONLY(*node) = 0;
87559 + return NULL_TREE;
87560 + }
87561 +
87562 + return NULL_TREE;
87563 +}
87564 +
87565 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
87566 +{
87567 + *no_add_attrs = true;
87568 + if (!TYPE_P(*node)) {
87569 + error("%qE attribute applies to types only", name);
87570 + return NULL_TREE;
87571 + }
87572 +
87573 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
87574 + error("%qE attribute applies to struct and union types only", name);
87575 + return NULL_TREE;
87576 + }
87577 +
87578 + *no_add_attrs = false;
87579 + constify_type(*node);
87580 + return NULL_TREE;
87581 +}
87582 +
87583 +static struct attribute_spec no_const_attr = {
87584 + .name = "no_const",
87585 + .min_length = 0,
87586 + .max_length = 0,
87587 + .decl_required = false,
87588 + .type_required = false,
87589 + .function_type_required = false,
87590 + .handler = handle_no_const_attribute,
87591 +#if BUILDING_GCC_VERSION >= 4007
87592 + .affects_type_identity = true
87593 +#endif
87594 +};
87595 +
87596 +static struct attribute_spec do_const_attr = {
87597 + .name = "do_const",
87598 + .min_length = 0,
87599 + .max_length = 0,
87600 + .decl_required = false,
87601 + .type_required = false,
87602 + .function_type_required = false,
87603 + .handler = handle_do_const_attribute,
87604 +#if BUILDING_GCC_VERSION >= 4007
87605 + .affects_type_identity = true
87606 +#endif
87607 +};
87608 +
87609 +static void register_attributes(void *event_data, void *data)
87610 +{
87611 + register_attribute(&no_const_attr);
87612 + register_attribute(&do_const_attr);
87613 +}
87614 +
87615 +static void constify_type(tree type)
87616 +{
87617 + TYPE_READONLY(type) = 1;
87618 + C_TYPE_FIELDS_READONLY(type) = 1;
87619 +}
87620 +
87621 +static bool is_fptr(tree field)
87622 +{
87623 + tree ptr = TREE_TYPE(field);
87624 +
87625 + if (TREE_CODE(ptr) != POINTER_TYPE)
87626 + return false;
87627 +
87628 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
87629 +}
87630 +
87631 +static bool walk_struct(tree node)
87632 +{
87633 + tree field;
87634 +
87635 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
87636 + return false;
87637 +
87638 + if (TYPE_FIELDS(node) == NULL_TREE)
87639 + return false;
87640 +
87641 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
87642 + tree type = TREE_TYPE(field);
87643 + enum tree_code code = TREE_CODE(type);
87644 + if (code == RECORD_TYPE || code == UNION_TYPE) {
87645 + if (!(walk_struct(type)))
87646 + return false;
87647 + } else if (!is_fptr(field) && !TREE_READONLY(field))
87648 + return false;
87649 + }
87650 + return true;
87651 +}
87652 +
87653 +static void finish_type(void *event_data, void *data)
87654 +{
87655 + tree type = (tree)event_data;
87656 +
87657 + if (type == NULL_TREE)
87658 + return;
87659 +
87660 + if (TYPE_READONLY(type))
87661 + return;
87662 +
87663 + if (walk_struct(type))
87664 + constify_type(type);
87665 +}
87666 +
87667 +static unsigned int check_local_variables(void);
87668 +
87669 +struct gimple_opt_pass pass_local_variable = {
87670 + {
87671 + .type = GIMPLE_PASS,
87672 + .name = "check_local_variables",
87673 + .gate = NULL,
87674 + .execute = check_local_variables,
87675 + .sub = NULL,
87676 + .next = NULL,
87677 + .static_pass_number = 0,
87678 + .tv_id = TV_NONE,
87679 + .properties_required = 0,
87680 + .properties_provided = 0,
87681 + .properties_destroyed = 0,
87682 + .todo_flags_start = 0,
87683 + .todo_flags_finish = 0
87684 + }
87685 +};
87686 +
87687 +static unsigned int check_local_variables(void)
87688 +{
87689 + tree var;
87690 + referenced_var_iterator rvi;
87691 +
87692 +#if BUILDING_GCC_VERSION == 4005
87693 + FOR_EACH_REFERENCED_VAR(var, rvi) {
87694 +#else
87695 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
87696 +#endif
87697 + tree type = TREE_TYPE(var);
87698 +
87699 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
87700 + continue;
87701 +
87702 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
87703 + continue;
87704 +
87705 + if (!TYPE_READONLY(type))
87706 + continue;
87707 +
87708 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
87709 +// continue;
87710 +
87711 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
87712 +// continue;
87713 +
87714 + if (walk_struct(type)) {
87715 + error("constified variable %qE cannot be local", var);
87716 + return 1;
87717 + }
87718 + }
87719 + return 0;
87720 +}
87721 +
87722 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87723 +{
87724 + const char * const plugin_name = plugin_info->base_name;
87725 + const int argc = plugin_info->argc;
87726 + const struct plugin_argument * const argv = plugin_info->argv;
87727 + int i;
87728 + bool constify = true;
87729 +
87730 + struct register_pass_info local_variable_pass_info = {
87731 + .pass = &pass_local_variable.pass,
87732 + .reference_pass_name = "*referenced_vars",
87733 + .ref_pass_instance_number = 0,
87734 + .pos_op = PASS_POS_INSERT_AFTER
87735 + };
87736 +
87737 + if (!plugin_default_version_check(version, &gcc_version)) {
87738 + error(G_("incompatible gcc/plugin versions"));
87739 + return 1;
87740 + }
87741 +
87742 + for (i = 0; i < argc; ++i) {
87743 + if (!(strcmp(argv[i].key, "no-constify"))) {
87744 + constify = false;
87745 + continue;
87746 + }
87747 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87748 + }
87749 +
87750 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
87751 + if (constify) {
87752 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
87753 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
87754 + }
87755 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
87756 +
87757 + return 0;
87758 +}
87759 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
87760 new file mode 100644
87761 index 0000000..a5eabce
87762 --- /dev/null
87763 +++ b/tools/gcc/kallocstat_plugin.c
87764 @@ -0,0 +1,167 @@
87765 +/*
87766 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87767 + * Licensed under the GPL v2
87768 + *
87769 + * Note: the choice of the license means that the compilation process is
87770 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87771 + * but for the kernel it doesn't matter since it doesn't link against
87772 + * any of the gcc libraries
87773 + *
87774 + * gcc plugin to find the distribution of k*alloc sizes
87775 + *
87776 + * TODO:
87777 + *
87778 + * BUGS:
87779 + * - none known
87780 + */
87781 +#include "gcc-plugin.h"
87782 +#include "config.h"
87783 +#include "system.h"
87784 +#include "coretypes.h"
87785 +#include "tree.h"
87786 +#include "tree-pass.h"
87787 +#include "flags.h"
87788 +#include "intl.h"
87789 +#include "toplev.h"
87790 +#include "plugin.h"
87791 +//#include "expr.h" where are you...
87792 +#include "diagnostic.h"
87793 +#include "plugin-version.h"
87794 +#include "tm.h"
87795 +#include "function.h"
87796 +#include "basic-block.h"
87797 +#include "gimple.h"
87798 +#include "rtl.h"
87799 +#include "emit-rtl.h"
87800 +
87801 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87802 +
87803 +int plugin_is_GPL_compatible;
87804 +
87805 +static const char * const kalloc_functions[] = {
87806 + "__kmalloc",
87807 + "kmalloc",
87808 + "kmalloc_large",
87809 + "kmalloc_node",
87810 + "kmalloc_order",
87811 + "kmalloc_order_trace",
87812 + "kmalloc_slab",
87813 + "kzalloc",
87814 + "kzalloc_node",
87815 +};
87816 +
87817 +static struct plugin_info kallocstat_plugin_info = {
87818 + .version = "201111150100",
87819 +};
87820 +
87821 +static unsigned int execute_kallocstat(void);
87822 +
87823 +static struct gimple_opt_pass kallocstat_pass = {
87824 + .pass = {
87825 + .type = GIMPLE_PASS,
87826 + .name = "kallocstat",
87827 + .gate = NULL,
87828 + .execute = execute_kallocstat,
87829 + .sub = NULL,
87830 + .next = NULL,
87831 + .static_pass_number = 0,
87832 + .tv_id = TV_NONE,
87833 + .properties_required = 0,
87834 + .properties_provided = 0,
87835 + .properties_destroyed = 0,
87836 + .todo_flags_start = 0,
87837 + .todo_flags_finish = 0
87838 + }
87839 +};
87840 +
87841 +static bool is_kalloc(const char *fnname)
87842 +{
87843 + size_t i;
87844 +
87845 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
87846 + if (!strcmp(fnname, kalloc_functions[i]))
87847 + return true;
87848 + return false;
87849 +}
87850 +
87851 +static unsigned int execute_kallocstat(void)
87852 +{
87853 + basic_block bb;
87854 +
87855 + // 1. loop through BBs and GIMPLE statements
87856 + FOR_EACH_BB(bb) {
87857 + gimple_stmt_iterator gsi;
87858 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87859 + // gimple match:
87860 + tree fndecl, size;
87861 + gimple call_stmt;
87862 + const char *fnname;
87863 +
87864 + // is it a call
87865 + call_stmt = gsi_stmt(gsi);
87866 + if (!is_gimple_call(call_stmt))
87867 + continue;
87868 + fndecl = gimple_call_fndecl(call_stmt);
87869 + if (fndecl == NULL_TREE)
87870 + continue;
87871 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
87872 + continue;
87873 +
87874 + // is it a call to k*alloc
87875 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
87876 + if (!is_kalloc(fnname))
87877 + continue;
87878 +
87879 + // is the size arg the result of a simple const assignment
87880 + size = gimple_call_arg(call_stmt, 0);
87881 + while (true) {
87882 + gimple def_stmt;
87883 + expanded_location xloc;
87884 + size_t size_val;
87885 +
87886 + if (TREE_CODE(size) != SSA_NAME)
87887 + break;
87888 + def_stmt = SSA_NAME_DEF_STMT(size);
87889 + if (!def_stmt || !is_gimple_assign(def_stmt))
87890 + break;
87891 + if (gimple_num_ops(def_stmt) != 2)
87892 + break;
87893 + size = gimple_assign_rhs1(def_stmt);
87894 + if (!TREE_CONSTANT(size))
87895 + continue;
87896 + xloc = expand_location(gimple_location(def_stmt));
87897 + if (!xloc.file)
87898 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
87899 + size_val = TREE_INT_CST_LOW(size);
87900 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
87901 + break;
87902 + }
87903 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
87904 +//debug_tree(gimple_call_fn(call_stmt));
87905 +//print_node(stderr, "pax", fndecl, 4);
87906 + }
87907 + }
87908 +
87909 + return 0;
87910 +}
87911 +
87912 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87913 +{
87914 + const char * const plugin_name = plugin_info->base_name;
87915 + struct register_pass_info kallocstat_pass_info = {
87916 + .pass = &kallocstat_pass.pass,
87917 + .reference_pass_name = "ssa",
87918 + .ref_pass_instance_number = 0,
87919 + .pos_op = PASS_POS_INSERT_AFTER
87920 + };
87921 +
87922 + if (!plugin_default_version_check(version, &gcc_version)) {
87923 + error(G_("incompatible gcc/plugin versions"));
87924 + return 1;
87925 + }
87926 +
87927 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
87928 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
87929 +
87930 + return 0;
87931 +}
87932 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
87933 new file mode 100644
87934 index 0000000..008f159
87935 --- /dev/null
87936 +++ b/tools/gcc/kernexec_plugin.c
87937 @@ -0,0 +1,427 @@
87938 +/*
87939 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87940 + * Licensed under the GPL v2
87941 + *
87942 + * Note: the choice of the license means that the compilation process is
87943 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87944 + * but for the kernel it doesn't matter since it doesn't link against
87945 + * any of the gcc libraries
87946 + *
87947 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
87948 + *
87949 + * TODO:
87950 + *
87951 + * BUGS:
87952 + * - none known
87953 + */
87954 +#include "gcc-plugin.h"
87955 +#include "config.h"
87956 +#include "system.h"
87957 +#include "coretypes.h"
87958 +#include "tree.h"
87959 +#include "tree-pass.h"
87960 +#include "flags.h"
87961 +#include "intl.h"
87962 +#include "toplev.h"
87963 +#include "plugin.h"
87964 +//#include "expr.h" where are you...
87965 +#include "diagnostic.h"
87966 +#include "plugin-version.h"
87967 +#include "tm.h"
87968 +#include "function.h"
87969 +#include "basic-block.h"
87970 +#include "gimple.h"
87971 +#include "rtl.h"
87972 +#include "emit-rtl.h"
87973 +#include "tree-flow.h"
87974 +
87975 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87976 +extern rtx emit_move_insn(rtx x, rtx y);
87977 +
87978 +int plugin_is_GPL_compatible;
87979 +
87980 +static struct plugin_info kernexec_plugin_info = {
87981 + .version = "201111291120",
87982 + .help = "method=[bts|or]\tinstrumentation method\n"
87983 +};
87984 +
87985 +static unsigned int execute_kernexec_reload(void);
87986 +static unsigned int execute_kernexec_fptr(void);
87987 +static unsigned int execute_kernexec_retaddr(void);
87988 +static bool kernexec_cmodel_check(void);
87989 +
87990 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
87991 +static void (*kernexec_instrument_retaddr)(rtx);
87992 +
87993 +static struct gimple_opt_pass kernexec_reload_pass = {
87994 + .pass = {
87995 + .type = GIMPLE_PASS,
87996 + .name = "kernexec_reload",
87997 + .gate = kernexec_cmodel_check,
87998 + .execute = execute_kernexec_reload,
87999 + .sub = NULL,
88000 + .next = NULL,
88001 + .static_pass_number = 0,
88002 + .tv_id = TV_NONE,
88003 + .properties_required = 0,
88004 + .properties_provided = 0,
88005 + .properties_destroyed = 0,
88006 + .todo_flags_start = 0,
88007 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
88008 + }
88009 +};
88010 +
88011 +static struct gimple_opt_pass kernexec_fptr_pass = {
88012 + .pass = {
88013 + .type = GIMPLE_PASS,
88014 + .name = "kernexec_fptr",
88015 + .gate = kernexec_cmodel_check,
88016 + .execute = execute_kernexec_fptr,
88017 + .sub = NULL,
88018 + .next = NULL,
88019 + .static_pass_number = 0,
88020 + .tv_id = TV_NONE,
88021 + .properties_required = 0,
88022 + .properties_provided = 0,
88023 + .properties_destroyed = 0,
88024 + .todo_flags_start = 0,
88025 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
88026 + }
88027 +};
88028 +
88029 +static struct rtl_opt_pass kernexec_retaddr_pass = {
88030 + .pass = {
88031 + .type = RTL_PASS,
88032 + .name = "kernexec_retaddr",
88033 + .gate = kernexec_cmodel_check,
88034 + .execute = execute_kernexec_retaddr,
88035 + .sub = NULL,
88036 + .next = NULL,
88037 + .static_pass_number = 0,
88038 + .tv_id = TV_NONE,
88039 + .properties_required = 0,
88040 + .properties_provided = 0,
88041 + .properties_destroyed = 0,
88042 + .todo_flags_start = 0,
88043 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
88044 + }
88045 +};
88046 +
88047 +static bool kernexec_cmodel_check(void)
88048 +{
88049 + tree section;
88050 +
88051 + if (ix86_cmodel != CM_KERNEL)
88052 + return false;
88053 +
88054 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
88055 + if (!section || !TREE_VALUE(section))
88056 + return true;
88057 +
88058 + section = TREE_VALUE(TREE_VALUE(section));
88059 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
88060 + return true;
88061 +
88062 + return false;
88063 +}
88064 +
88065 +/*
88066 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
88067 + */
88068 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
88069 +{
88070 + gimple asm_movabs_stmt;
88071 +
88072 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
88073 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
88074 + gimple_asm_set_volatile(asm_movabs_stmt, true);
88075 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
88076 + update_stmt(asm_movabs_stmt);
88077 +}
88078 +
88079 +/*
88080 + * find all asm() stmts that clobber r10 and add a reload of r10
88081 + */
88082 +static unsigned int execute_kernexec_reload(void)
88083 +{
88084 + basic_block bb;
88085 +
88086 + // 1. loop through BBs and GIMPLE statements
88087 + FOR_EACH_BB(bb) {
88088 + gimple_stmt_iterator gsi;
88089 +
88090 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
88091 + // gimple match: __asm__ ("" : : : "r10");
88092 + gimple asm_stmt;
88093 + size_t nclobbers;
88094 +
88095 + // is it an asm ...
88096 + asm_stmt = gsi_stmt(gsi);
88097 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
88098 + continue;
88099 +
88100 + // ... clobbering r10
88101 + nclobbers = gimple_asm_nclobbers(asm_stmt);
88102 + while (nclobbers--) {
88103 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
88104 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
88105 + continue;
88106 + kernexec_reload_fptr_mask(&gsi);
88107 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
88108 + break;
88109 + }
88110 + }
88111 + }
88112 +
88113 + return 0;
88114 +}
88115 +
88116 +/*
88117 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
88118 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
88119 + */
88120 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
88121 +{
88122 + gimple assign_intptr, assign_new_fptr, call_stmt;
88123 + tree intptr, old_fptr, new_fptr, kernexec_mask;
88124 +
88125 + call_stmt = gsi_stmt(*gsi);
88126 + old_fptr = gimple_call_fn(call_stmt);
88127 +
88128 + // create temporary unsigned long variable used for bitops and cast fptr to it
88129 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
88130 + add_referenced_var(intptr);
88131 + mark_sym_for_renaming(intptr);
88132 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
88133 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
88134 + update_stmt(assign_intptr);
88135 +
88136 + // apply logical or to temporary unsigned long and bitmask
88137 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
88138 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
88139 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
88140 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
88141 + update_stmt(assign_intptr);
88142 +
88143 + // cast temporary unsigned long back to a temporary fptr variable
88144 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
88145 + add_referenced_var(new_fptr);
88146 + mark_sym_for_renaming(new_fptr);
88147 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
88148 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
88149 + update_stmt(assign_new_fptr);
88150 +
88151 + // replace call stmt fn with the new fptr
88152 + gimple_call_set_fn(call_stmt, new_fptr);
88153 + update_stmt(call_stmt);
88154 +}
88155 +
88156 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
88157 +{
88158 + gimple asm_or_stmt, call_stmt;
88159 + tree old_fptr, new_fptr, input, output;
88160 + VEC(tree, gc) *inputs = NULL;
88161 + VEC(tree, gc) *outputs = NULL;
88162 +
88163 + call_stmt = gsi_stmt(*gsi);
88164 + old_fptr = gimple_call_fn(call_stmt);
88165 +
88166 + // create temporary fptr variable
88167 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
88168 + add_referenced_var(new_fptr);
88169 + mark_sym_for_renaming(new_fptr);
88170 +
88171 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
88172 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
88173 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
88174 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
88175 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
88176 + VEC_safe_push(tree, gc, inputs, input);
88177 + VEC_safe_push(tree, gc, outputs, output);
88178 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
88179 + gimple_asm_set_volatile(asm_or_stmt, true);
88180 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
88181 + update_stmt(asm_or_stmt);
88182 +
88183 + // replace call stmt fn with the new fptr
88184 + gimple_call_set_fn(call_stmt, new_fptr);
88185 + update_stmt(call_stmt);
88186 +}
88187 +
88188 +/*
88189 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
88190 + */
88191 +static unsigned int execute_kernexec_fptr(void)
88192 +{
88193 + basic_block bb;
88194 +
88195 + // 1. loop through BBs and GIMPLE statements
88196 + FOR_EACH_BB(bb) {
88197 + gimple_stmt_iterator gsi;
88198 +
88199 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
88200 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
88201 + tree fn;
88202 + gimple call_stmt;
88203 +
88204 + // is it a call ...
88205 + call_stmt = gsi_stmt(gsi);
88206 + if (!is_gimple_call(call_stmt))
88207 + continue;
88208 + fn = gimple_call_fn(call_stmt);
88209 + if (TREE_CODE(fn) == ADDR_EXPR)
88210 + continue;
88211 + if (TREE_CODE(fn) != SSA_NAME)
88212 + gcc_unreachable();
88213 +
88214 + // ... through a function pointer
88215 + fn = SSA_NAME_VAR(fn);
88216 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
88217 + continue;
88218 + fn = TREE_TYPE(fn);
88219 + if (TREE_CODE(fn) != POINTER_TYPE)
88220 + continue;
88221 + fn = TREE_TYPE(fn);
88222 + if (TREE_CODE(fn) != FUNCTION_TYPE)
88223 + continue;
88224 +
88225 + kernexec_instrument_fptr(&gsi);
88226 +
88227 +//debug_tree(gimple_call_fn(call_stmt));
88228 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
88229 + }
88230 + }
88231 +
88232 + return 0;
88233 +}
88234 +
88235 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
88236 +static void kernexec_instrument_retaddr_bts(rtx insn)
88237 +{
88238 + rtx btsq;
88239 + rtvec argvec, constraintvec, labelvec;
88240 + int line;
88241 +
88242 + // create asm volatile("btsq $63,(%%rsp)":::)
88243 + argvec = rtvec_alloc(0);
88244 + constraintvec = rtvec_alloc(0);
88245 + labelvec = rtvec_alloc(0);
88246 + line = expand_location(RTL_LOCATION(insn)).line;
88247 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
88248 + MEM_VOLATILE_P(btsq) = 1;
88249 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
88250 + emit_insn_before(btsq, insn);
88251 +}
88252 +
88253 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
88254 +static void kernexec_instrument_retaddr_or(rtx insn)
88255 +{
88256 + rtx orq;
88257 + rtvec argvec, constraintvec, labelvec;
88258 + int line;
88259 +
88260 + // create asm volatile("orq %%r10,(%%rsp)":::)
88261 + argvec = rtvec_alloc(0);
88262 + constraintvec = rtvec_alloc(0);
88263 + labelvec = rtvec_alloc(0);
88264 + line = expand_location(RTL_LOCATION(insn)).line;
88265 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
88266 + MEM_VOLATILE_P(orq) = 1;
88267 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
88268 + emit_insn_before(orq, insn);
88269 +}
88270 +
88271 +/*
88272 + * find all asm level function returns and forcibly set the highest bit of the return address
88273 + */
88274 +static unsigned int execute_kernexec_retaddr(void)
88275 +{
88276 + rtx insn;
88277 +
88278 + // 1. find function returns
88279 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
88280 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
88281 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
88282 + rtx body;
88283 +
88284 + // is it a retn
88285 + if (!JUMP_P(insn))
88286 + continue;
88287 + body = PATTERN(insn);
88288 + if (GET_CODE(body) == PARALLEL)
88289 + body = XVECEXP(body, 0, 0);
88290 + if (GET_CODE(body) != RETURN)
88291 + continue;
88292 + kernexec_instrument_retaddr(insn);
88293 + }
88294 +
88295 +// print_simple_rtl(stderr, get_insns());
88296 +// print_rtl(stderr, get_insns());
88297 +
88298 + return 0;
88299 +}
88300 +
88301 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
88302 +{
88303 + const char * const plugin_name = plugin_info->base_name;
88304 + const int argc = plugin_info->argc;
88305 + const struct plugin_argument * const argv = plugin_info->argv;
88306 + int i;
88307 + struct register_pass_info kernexec_reload_pass_info = {
88308 + .pass = &kernexec_reload_pass.pass,
88309 + .reference_pass_name = "ssa",
88310 + .ref_pass_instance_number = 0,
88311 + .pos_op = PASS_POS_INSERT_AFTER
88312 + };
88313 + struct register_pass_info kernexec_fptr_pass_info = {
88314 + .pass = &kernexec_fptr_pass.pass,
88315 + .reference_pass_name = "ssa",
88316 + .ref_pass_instance_number = 0,
88317 + .pos_op = PASS_POS_INSERT_AFTER
88318 + };
88319 + struct register_pass_info kernexec_retaddr_pass_info = {
88320 + .pass = &kernexec_retaddr_pass.pass,
88321 + .reference_pass_name = "pro_and_epilogue",
88322 + .ref_pass_instance_number = 0,
88323 + .pos_op = PASS_POS_INSERT_AFTER
88324 + };
88325 +
88326 + if (!plugin_default_version_check(version, &gcc_version)) {
88327 + error(G_("incompatible gcc/plugin versions"));
88328 + return 1;
88329 + }
88330 +
88331 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
88332 +
88333 + if (TARGET_64BIT == 0)
88334 + return 0;
88335 +
88336 + for (i = 0; i < argc; ++i) {
88337 + if (!strcmp(argv[i].key, "method")) {
88338 + if (!argv[i].value) {
88339 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88340 + continue;
88341 + }
88342 + if (!strcmp(argv[i].value, "bts")) {
88343 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
88344 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
88345 + } else if (!strcmp(argv[i].value, "or")) {
88346 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
88347 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
88348 + fix_register("r10", 1, 1);
88349 + } else
88350 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88351 + continue;
88352 + }
88353 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88354 + }
88355 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
88356 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
88357 +
88358 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
88359 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
88360 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
88361 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
88362 +
88363 + return 0;
88364 +}
88365 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
88366 new file mode 100644
88367 index 0000000..4a9b187
88368 --- /dev/null
88369 +++ b/tools/gcc/stackleak_plugin.c
88370 @@ -0,0 +1,326 @@
88371 +/*
88372 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
88373 + * Licensed under the GPL v2
88374 + *
88375 + * Note: the choice of the license means that the compilation process is
88376 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
88377 + * but for the kernel it doesn't matter since it doesn't link against
88378 + * any of the gcc libraries
88379 + *
88380 + * gcc plugin to help implement various PaX features
88381 + *
88382 + * - track lowest stack pointer
88383 + *
88384 + * TODO:
88385 + * - initialize all local variables
88386 + *
88387 + * BUGS:
88388 + * - none known
88389 + */
88390 +#include "gcc-plugin.h"
88391 +#include "config.h"
88392 +#include "system.h"
88393 +#include "coretypes.h"
88394 +#include "tree.h"
88395 +#include "tree-pass.h"
88396 +#include "flags.h"
88397 +#include "intl.h"
88398 +#include "toplev.h"
88399 +#include "plugin.h"
88400 +//#include "expr.h" where are you...
88401 +#include "diagnostic.h"
88402 +#include "plugin-version.h"
88403 +#include "tm.h"
88404 +#include "function.h"
88405 +#include "basic-block.h"
88406 +#include "gimple.h"
88407 +#include "rtl.h"
88408 +#include "emit-rtl.h"
88409 +
88410 +extern void print_gimple_stmt(FILE *, gimple, int, int);
88411 +
88412 +int plugin_is_GPL_compatible;
88413 +
88414 +static int track_frame_size = -1;
88415 +static const char track_function[] = "pax_track_stack";
88416 +static const char check_function[] = "pax_check_alloca";
88417 +static tree pax_check_alloca_decl;
88418 +static tree pax_track_stack_decl;
88419 +static bool init_locals;
88420 +
88421 +static struct plugin_info stackleak_plugin_info = {
88422 + .version = "201203021600",
88423 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
88424 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
88425 +};
88426 +
88427 +static bool gate_stackleak_track_stack(void);
88428 +static unsigned int execute_stackleak_tree_instrument(void);
88429 +static unsigned int execute_stackleak_final(void);
88430 +
88431 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
88432 + .pass = {
88433 + .type = GIMPLE_PASS,
88434 + .name = "stackleak_tree_instrument",
88435 + .gate = gate_stackleak_track_stack,
88436 + .execute = execute_stackleak_tree_instrument,
88437 + .sub = NULL,
88438 + .next = NULL,
88439 + .static_pass_number = 0,
88440 + .tv_id = TV_NONE,
88441 + .properties_required = PROP_gimple_leh | PROP_cfg,
88442 + .properties_provided = 0,
88443 + .properties_destroyed = 0,
88444 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
88445 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
88446 + }
88447 +};
88448 +
88449 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
88450 + .pass = {
88451 + .type = RTL_PASS,
88452 + .name = "stackleak_final",
88453 + .gate = gate_stackleak_track_stack,
88454 + .execute = execute_stackleak_final,
88455 + .sub = NULL,
88456 + .next = NULL,
88457 + .static_pass_number = 0,
88458 + .tv_id = TV_NONE,
88459 + .properties_required = 0,
88460 + .properties_provided = 0,
88461 + .properties_destroyed = 0,
88462 + .todo_flags_start = 0,
88463 + .todo_flags_finish = TODO_dump_func
88464 + }
88465 +};
88466 +
88467 +static bool gate_stackleak_track_stack(void)
88468 +{
88469 + return track_frame_size >= 0;
88470 +}
88471 +
88472 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
88473 +{
88474 + gimple check_alloca;
88475 + tree alloca_size;
88476 +
88477 + // insert call to void pax_check_alloca(unsigned long size)
88478 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
88479 + check_alloca = gimple_build_call(pax_check_alloca_decl, 1, alloca_size);
88480 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
88481 +}
88482 +
88483 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
88484 +{
88485 + gimple track_stack;
88486 +
88487 + // insert call to void pax_track_stack(void)
88488 + track_stack = gimple_build_call(pax_track_stack_decl, 0);
88489 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
88490 +}
88491 +
88492 +#if BUILDING_GCC_VERSION == 4005
88493 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
88494 +{
88495 + tree fndecl;
88496 +
88497 + if (!is_gimple_call(stmt))
88498 + return false;
88499 + fndecl = gimple_call_fndecl(stmt);
88500 + if (!fndecl)
88501 + return false;
88502 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
88503 + return false;
88504 +// print_node(stderr, "pax", fndecl, 4);
88505 + return DECL_FUNCTION_CODE(fndecl) == code;
88506 +}
88507 +#endif
88508 +
88509 +static bool is_alloca(gimple stmt)
88510 +{
88511 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
88512 + return true;
88513 +
88514 +#if BUILDING_GCC_VERSION >= 4007
88515 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
88516 + return true;
88517 +#endif
88518 +
88519 + return false;
88520 +}
88521 +
88522 +static unsigned int execute_stackleak_tree_instrument(void)
88523 +{
88524 + basic_block bb, entry_bb;
88525 + bool prologue_instrumented = false, is_leaf = true;
88526 +
88527 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
88528 +
88529 + // 1. loop through BBs and GIMPLE statements
88530 + FOR_EACH_BB(bb) {
88531 + gimple_stmt_iterator gsi;
88532 +
88533 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
88534 + gimple stmt;
88535 +
88536 + stmt = gsi_stmt(gsi);
88537 +
88538 + if (is_gimple_call(stmt))
88539 + is_leaf = false;
88540 +
88541 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
88542 + if (!is_alloca(stmt))
88543 + continue;
88544 +
88545 + // 2. insert stack overflow check before each __builtin_alloca call
88546 + stackleak_check_alloca(&gsi);
88547 +
88548 + // 3. insert track call after each __builtin_alloca call
88549 + stackleak_add_instrumentation(&gsi);
88550 + if (bb == entry_bb)
88551 + prologue_instrumented = true;
88552 + }
88553 + }
88554 +
88555 + // special case for some bad linux code: taking the address of static inline functions will materialize them
88556 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
88557 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
88558 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
88559 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
88560 + return 0;
88561 +
88562 + // 4. insert track call at the beginning
88563 + if (!prologue_instrumented) {
88564 + gimple_stmt_iterator gsi;
88565 +
88566 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
88567 + if (dom_info_available_p(CDI_DOMINATORS))
88568 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
88569 + gsi = gsi_start_bb(bb);
88570 + stackleak_add_instrumentation(&gsi);
88571 + }
88572 +
88573 + return 0;
88574 +}
88575 +
88576 +static unsigned int execute_stackleak_final(void)
88577 +{
88578 + rtx insn;
88579 +
88580 + if (cfun->calls_alloca)
88581 + return 0;
88582 +
88583 + // keep calls only if function frame is big enough
88584 + if (get_frame_size() >= track_frame_size)
88585 + return 0;
88586 +
88587 + // 1. find pax_track_stack calls
88588 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
88589 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
88590 + rtx body;
88591 +
88592 + if (!CALL_P(insn))
88593 + continue;
88594 + body = PATTERN(insn);
88595 + if (GET_CODE(body) != CALL)
88596 + continue;
88597 + body = XEXP(body, 0);
88598 + if (GET_CODE(body) != MEM)
88599 + continue;
88600 + body = XEXP(body, 0);
88601 + if (GET_CODE(body) != SYMBOL_REF)
88602 + continue;
88603 + if (strcmp(XSTR(body, 0), track_function))
88604 + continue;
88605 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
88606 + // 2. delete call
88607 + insn = delete_insn_and_edges(insn);
88608 +#if BUILDING_GCC_VERSION >= 4007
88609 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
88610 + insn = delete_insn_and_edges(insn);
88611 +#endif
88612 + }
88613 +
88614 +// print_simple_rtl(stderr, get_insns());
88615 +// print_rtl(stderr, get_insns());
88616 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
88617 +
88618 + return 0;
88619 +}
88620 +
88621 +static void stackleak_start_unit(void *gcc_data, void *user_dat)
88622 +{
88623 + tree fntype;
88624 +
88625 + // declare void pax_check_alloca(unsigned long size)
88626 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
88627 + pax_check_alloca_decl = build_fn_decl(check_function, fntype);
88628 + DECL_ASSEMBLER_NAME(pax_check_alloca_decl); // for LTO
88629 + TREE_PUBLIC(pax_check_alloca_decl) = 1;
88630 + DECL_EXTERNAL(pax_check_alloca_decl) = 1;
88631 + DECL_ARTIFICIAL(pax_check_alloca_decl) = 1;
88632 +
88633 + // declare void pax_track_stack(void)
88634 + fntype = build_function_type_list(void_type_node, NULL_TREE);
88635 + pax_track_stack_decl = build_fn_decl(track_function, fntype);
88636 + DECL_ASSEMBLER_NAME(pax_track_stack_decl); // for LTO
88637 + TREE_PUBLIC(pax_track_stack_decl) = 1;
88638 + DECL_EXTERNAL(pax_track_stack_decl) = 1;
88639 + DECL_ARTIFICIAL(pax_track_stack_decl) = 1;
88640 +}
88641 +
88642 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
88643 +{
88644 + const char * const plugin_name = plugin_info->base_name;
88645 + const int argc = plugin_info->argc;
88646 + const struct plugin_argument * const argv = plugin_info->argv;
88647 + int i;
88648 + struct register_pass_info stackleak_tree_instrument_pass_info = {
88649 + .pass = &stackleak_tree_instrument_pass.pass,
88650 +// .reference_pass_name = "tree_profile",
88651 + .reference_pass_name = "optimized",
88652 + .ref_pass_instance_number = 0,
88653 + .pos_op = PASS_POS_INSERT_BEFORE
88654 + };
88655 + struct register_pass_info stackleak_final_pass_info = {
88656 + .pass = &stackleak_final_rtl_opt_pass.pass,
88657 + .reference_pass_name = "final",
88658 + .ref_pass_instance_number = 0,
88659 + .pos_op = PASS_POS_INSERT_BEFORE
88660 + };
88661 +
88662 + if (!plugin_default_version_check(version, &gcc_version)) {
88663 + error(G_("incompatible gcc/plugin versions"));
88664 + return 1;
88665 + }
88666 +
88667 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
88668 +
88669 + for (i = 0; i < argc; ++i) {
88670 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
88671 + if (!argv[i].value) {
88672 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88673 + continue;
88674 + }
88675 + track_frame_size = atoi(argv[i].value);
88676 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
88677 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88678 + continue;
88679 + }
88680 + if (!strcmp(argv[i].key, "initialize-locals")) {
88681 + if (argv[i].value) {
88682 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88683 + continue;
88684 + }
88685 + init_locals = true;
88686 + continue;
88687 + }
88688 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88689 + }
88690 +
88691 + register_callback("start_unit", PLUGIN_START_UNIT, &stackleak_start_unit, NULL);
88692 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
88693 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
88694 +
88695 + return 0;
88696 +}
88697 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
88698 index 83b3dde..835bee7 100644
88699 --- a/usr/gen_init_cpio.c
88700 +++ b/usr/gen_init_cpio.c
88701 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name, const char *location,
88702 int retval;
88703 int rc = -1;
88704 int namesize;
88705 - int i;
88706 + unsigned int i;
88707
88708 mode |= S_IFREG;
88709
88710 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_location)
88711 *env_var = *expanded = '\0';
88712 strncat(env_var, start + 2, end - start - 2);
88713 strncat(expanded, new_location, start - new_location);
88714 - strncat(expanded, getenv(env_var), PATH_MAX);
88715 - strncat(expanded, end + 1, PATH_MAX);
88716 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
88717 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
88718 strncpy(new_location, expanded, PATH_MAX);
88719 + new_location[PATH_MAX] = 0;
88720 } else
88721 break;
88722 }
88723 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
88724 index 4f3434f..fc63040 100644
88725 --- a/virt/kvm/kvm_main.c
88726 +++ b/virt/kvm/kvm_main.c
88727 @@ -43,6 +43,8 @@
88728 #include <linux/swap.h>
88729 #include <linux/bitops.h>
88730 #include <linux/spinlock.h>
88731 +#include <linux/namei.h>
88732 +#include <linux/fs.h>
88733
88734 #include <asm/processor.h>
88735 #include <asm/io.h>
88736 @@ -575,12 +577,73 @@ out:
88737 return r;
88738 }
88739
88740 +/*
88741 + * We want to test whether the caller has been granted permissions to
88742 + * use this device. To be able to configure and control the device,
88743 + * the user needs access to PCI configuration space and BAR resources.
88744 + * These are accessed through PCI sysfs. PCI config space is often
88745 + * passed to the process calling this ioctl via file descriptor, so we
88746 + * can't rely on access to that file. We can check for permissions
88747 + * on each of the BAR resource files, which is a pretty clear
88748 + * indicator that the user has been granted access to the device.
88749 + */
88750 +static int probe_sysfs_permissions(struct pci_dev *dev)
88751 +{
88752 +#ifdef CONFIG_SYSFS
88753 + int i;
88754 + bool bar_found = false;
88755 +
88756 + for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
88757 + char *kpath, *syspath;
88758 + struct path path;
88759 + struct inode *inode;
88760 + int r;
88761 +
88762 + if (!pci_resource_len(dev, i))
88763 + continue;
88764 +
88765 + kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
88766 + if (!kpath)
88767 + return -ENOMEM;
88768 +
88769 + /* Per sysfs-rules, sysfs is always at /sys */
88770 + syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
88771 + kfree(kpath);
88772 + if (!syspath)
88773 + return -ENOMEM;
88774 +
88775 + r = kern_path(syspath, LOOKUP_FOLLOW, &path);
88776 + kfree(syspath);
88777 + if (r)
88778 + return r;
88779 +
88780 + inode = path.dentry->d_inode;
88781 +
88782 + r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
88783 + path_put(&path);
88784 + if (r)
88785 + return r;
88786 +
88787 + bar_found = true;
88788 + }
88789 +
88790 + /* If no resources, probably something special */
88791 + if (!bar_found)
88792 + return -EPERM;
88793 +
88794 + return 0;
88795 +#else
88796 + return -EINVAL; /* No way to control the device without sysfs */
88797 +#endif
88798 +}
88799 +
88800 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
88801 struct kvm_assigned_pci_dev *assigned_dev)
88802 {
88803 int r = 0;
88804 struct kvm_assigned_dev_kernel *match;
88805 struct pci_dev *dev;
88806 + u8 header_type;
88807
88808 down_read(&kvm->slots_lock);
88809 mutex_lock(&kvm->lock);
88810 @@ -607,6 +670,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
88811 r = -EINVAL;
88812 goto out_free;
88813 }
88814 +
88815 + /* Don't allow bridges to be assigned */
88816 + pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
88817 + if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
88818 + r = -EPERM;
88819 + goto out_put;
88820 + }
88821 +
88822 + r = probe_sysfs_permissions(dev);
88823 + if (r)
88824 + goto out_put;
88825 +
88826 if (pci_enable_device(dev)) {
88827 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
88828 r = -EBUSY;
88829 @@ -2494,7 +2569,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
88830 if (kvm_rebooting)
88831 /* spin while reset goes on */
88832 while (true)
88833 - ;
88834 + cpu_relax();
88835 /* Fault while not rebooting. We want the trace. */
88836 BUG();
88837 }
88838 @@ -2714,7 +2789,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
88839 kvm_arch_vcpu_put(vcpu);
88840 }
88841
88842 -int kvm_init(void *opaque, unsigned int vcpu_size,
88843 +int kvm_init(const void *opaque, unsigned int vcpu_size,
88844 struct module *module)
88845 {
88846 int r;
88847 @@ -2767,15 +2842,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
88848 /* A kmem cache lets us meet the alignment requirements of fx_save. */
88849 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
88850 __alignof__(struct kvm_vcpu),
88851 - 0, NULL);
88852 + SLAB_USERCOPY, NULL);
88853 if (!kvm_vcpu_cache) {
88854 r = -ENOMEM;
88855 goto out_free_5;
88856 }
88857
88858 - kvm_chardev_ops.owner = module;
88859 - kvm_vm_fops.owner = module;
88860 - kvm_vcpu_fops.owner = module;
88861 + pax_open_kernel();
88862 + *(void **)&kvm_chardev_ops.owner = module;
88863 + *(void **)&kvm_vm_fops.owner = module;
88864 + *(void **)&kvm_vcpu_fops.owner = module;
88865 + pax_close_kernel();
88866
88867 r = misc_register(&kvm_dev);
88868 if (r) {